aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrederick Mayle <fmayle@google.com>2023-10-16 22:20:15 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-10-16 22:20:15 +0000
commit8355c2656b4127ebb187f6a77036ab86ba3331a3 (patch)
tree487673684b83e84a687e6b73541a2e1290b9befa
parent3837597c0a52ad7ddebffe9e06615f912002bb39 (diff)
parentd5d504124c15bff10b72b52914ee5a6019a03052 (diff)
downloadzerocopy-8355c2656b4127ebb187f6a77036ab86ba3331a3.tar.gz
Revert^2 "Upgrade zerocopy to 0.7.5" am: 7d80d53b47 am: 10d2be98ef am: 05382c99ab am: 08ef739821 am: d5d504124c
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/zerocopy/+/2787226 Change-Id: I3d841ee222f6641a16df2ecc4c15213564784faa Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json6
-rw-r--r--Android.bp49
-rw-r--r--CONTRIBUTING.md211
-rw-r--r--Cargo.toml54
-rw-r--r--Cargo.toml.orig51
-rw-r--r--INTERNAL.md33
-rw-r--r--METADATA14
-rw-r--r--README.md91
-rwxr-xr-xcargo.sh82
-rw-r--r--cargo2android.json6
-rwxr-xr-xgenerate-readme.sh42
-rw-r--r--rustfmt.toml15
-rw-r--r--src/byteorder.rs420
-rw-r--r--src/derive_util.rs127
-rw-r--r--src/lib.rs3883
-rw-r--r--src/macros.rs250
-rw-r--r--src/util.rs105
-rw-r--r--src/wrappers.rs497
-rw-r--r--tests/trybuild.rs45
-rw-r--r--tests/ui-msrv/invalid-impls/invalid-impls.rs25
-rw-r--r--tests/ui-msrv/invalid-impls/invalid-impls.stderr127
-rw-r--r--tests/ui-msrv/transmute-illegal.rs10
-rw-r--r--tests/ui-msrv/transmute-illegal.stderr18
-rw-r--r--tests/ui-nightly/invalid-impls/invalid-impls.rs25
-rw-r--r--tests/ui-nightly/invalid-impls/invalid-impls.stderr107
-rw-r--r--tests/ui-nightly/transmute-illegal.rs10
-rw-r--r--tests/ui-nightly/transmute-illegal.stderr16
-rw-r--r--tests/ui-stable/invalid-impls/invalid-impls.rs25
-rw-r--r--tests/ui-stable/invalid-impls/invalid-impls.stderr107
-rw-r--r--tests/ui-stable/transmute-illegal.rs10
-rw-r--r--tests/ui-stable/transmute-illegal.stderr16
31 files changed, 4999 insertions, 1478 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..e74a0f5
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,6 @@
+{
+ "git": {
+ "sha1": "c150d4f1b75fc21240574b6b7dbbcdc236d388b0"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 06af1ed..63ead71 100644
--- a/Android.bp
+++ b/Android.bp
@@ -23,9 +23,14 @@ rust_library {
host_supported: true,
crate_name: "zerocopy",
cargo_env_compat: true,
- cargo_pkg_version: "0.6.1",
+ cargo_pkg_version: "0.7.5",
srcs: ["src/lib.rs"],
- edition: "2018",
+ edition: "2021",
+ features: [
+ "byteorder",
+ "derive",
+ "zerocopy-derive",
+ ],
rustlibs: [
"libbyteorder",
],
@@ -42,10 +47,15 @@ rust_library_rlib {
name: "libzerocopy_nostd",
crate_name: "zerocopy",
cargo_env_compat: true,
- cargo_pkg_version: "0.6.1",
+ cargo_pkg_version: "0.7.5",
srcs: ["src/lib.rs"],
- edition: "2018",
- features: ["alloc"],
+ edition: "2021",
+ features: [
+ "alloc",
+ "byteorder",
+ "derive",
+ "zerocopy-derive",
+ ],
rustlibs: [
"libbyteorder_nostd",
],
@@ -69,9 +79,14 @@ rust_library_rlib {
name: "libzerocopy_nostd_noalloc",
crate_name: "zerocopy",
cargo_env_compat: true,
- cargo_pkg_version: "0.6.1",
+ cargo_pkg_version: "0.7.5",
srcs: ["src/lib.rs"],
- edition: "2018",
+ edition: "2021",
+ features: [
+ "byteorder",
+ "derive",
+ "zerocopy-derive",
+ ],
rustlibs: [
"libbyteorder_nostd",
],
@@ -89,23 +104,3 @@ rust_library_rlib {
product_available: true,
vendor_available: true,
}
-
-rust_test {
- name: "zerocopy_test_src_lib",
- host_supported: true,
- crate_name: "zerocopy",
- cargo_env_compat: true,
- cargo_pkg_version: "0.6.1",
- srcs: ["src/lib.rs"],
- test_suites: ["general-tests"],
- auto_gen_config: true,
- test_options: {
- unit_test: true,
- },
- edition: "2018",
- rustlibs: [
- "libbyteorder",
- "librand",
- ],
- proc_macros: ["libzerocopy_derive"],
-}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..5d79e93
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,211 @@
+<!-- Copyright 2022 The Fuchsia Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+# How to Contribute
+
+We'd love to accept your patches and contributions to zerocopy. There are just a
+few small guidelines you need to follow.
+
+Once you've read the rest of this doc, check out our [good-first-issue
+label][good-first-issue] for some good issues you can use to get your toes wet!
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution;
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to <https://cla.developers.google.com/> to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code Reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult [GitHub
+Help][about_pull_requests] for more information on using pull requests.
+
+## Code Guidelines
+
+### Philosophy
+
+This section is inspired by [Flutter's style guide][flutter_philosophy], which
+contains many general principles that you should apply to all your programming
+work. Read it. The below calls out specific aspects that we feel are
+particularly important.
+
+#### Dogfood Your Features
+
+In non-library code, it's often advised to only implement features you need.
+After all, it's hard to correctly design code without a concrete use case to
+guide its design. Since zerocopy is a library, this advice is not as applicable;
+we want our API surface to be featureful and complete even if not every feature
+or method has a known use case. However, the observation that unused code is
+hard to design still holds.
+
+Thus, when designing external-facing features, try to make use of them somehow.
+This could be by using them to implement other features, or it could be by
+writing prototype code which won't actually be checked in anywhere. If you're
+feeling ambitious, you could even add (and check in) a [Cargo
+example][cargo_example] that exercises the new feature.
+
+#### Go Down the Rabbit Hole
+
+You will occasionally encounter behavior that surprises you or seems wrong. It
+probably is! Invest the time to find the root cause - you will either learn
+something, or fix something, and both are worth your time. Do not work around
+behavior you don't understand.
+
+### Avoid Duplication
+
+Avoid duplicating code whenever possible. In cases where existing code is not
+exposed in a manner suitable to your needs, prefer to extract the necessary
+parts into a common dependency.
+
+### Comments
+
+When writing comments, take a moment to consider the future reader of your
+comment. Ensure that your comments are complete sentences with proper grammar
+and punctuation. Note that adding more comments or more verbose comments is not
+always better; for example, avoid comments that repeat the code they're anchored
+on.
+
+Documentation comments should be self-contained; in other words, do not assume
+that the reader is aware of documentation in adjacent files or on adjacent
+structures. Avoid documentation comments on types which describe _instances_ of
+the type; for example, `AddressSet is a set of client addresses.` is a comment
+that describes a field of type `AddressSet`, but the type may be used to hold
+any kind of `Address`, not just a client's.
+
+Phrase your comments to avoid references that might become stale; for example:
+do not mention a variable or type by name when possible (certain doc comments
+are necessary exceptions). Also avoid references to past or future versions of
+or past or future work surrounding the item being documented; explain things
+from first principles rather than making external references (including past
+revisions).
+
+When writing TODOs:
+
+1. Include an issue reference using the format `TODO(#123):`
+1. Phrase the text as an action that is to be taken; it should be possible for
+ another contributor to pick up the TODO without consulting any external
+ sources, including the referenced issue.
+
+### Tests
+
+Much of the code in zerocopy has the property that, if it is buggy, those bugs
+may not cause user code to fail. This makes it extra important to write thorough
+tests, but it also makes it harder to write those tests correctly. Here are some
+guidelines on how to test code in zerocopy:
+1. All code added to zerocopy must include tests that exercise it completely.
+1. Tests must be deterministic. Threaded or time-dependent code, random number
+ generators (RNGs), and communication with external processes are common
+ sources of nondeterminism. See [Write reproducible, deterministic
+ tests][determinism] for tips.
+1. Avoid [change detector tests][change_detector_tests]; tests that are
+ unnecessarily sensitive to changes, especially ones external to the code
+ under test, can hamper feature development and refactoring.
+1. Since we run tests in [Miri][miri], make sure that tests exist which exercise
+ any potential [undefined behavior][undefined_behavior] so that Miri can catch
+ it.
+1. If there's some user code that should be impossible to compile, add a
+ [trybuild test][trybuild] to ensure that it's properly rejected.
+
+### Source Control Best Practices
+
+Commits should be arranged for ease of reading; that is, incidental changes
+such as code movement or formatting changes should be committed separately from
+actual code changes.
+
+Commits should always be focused. For example, a commit could add a feature,
+fix a bug, or refactor code, but not a mixture.
+
+Commits should be thoughtfully sized; avoid overly large or complex commits
+which can be logically separated, but also avoid overly separated commits that
+require code reviews to load multiple commits into their mental working memory
+in order to properly understand how the various pieces fit together.
+
+#### Commit Messages
+
+Commit messages should be _concise_ but self-contained (avoid relying on issue
+references as explanations for changes) and written such that they are helpful
+to people reading in the future (include rationale and any necessary context).
+
+Avoid superfluous details or narrative.
+
+Commit messages should consist of a brief subject line and a separate
+explanatory paragraph in accordance with the following:
+
+1. [Separate subject from body with a blank line](https://chris.beams.io/posts/git-commit/#separate)
+1. [Limit the subject line to 50 characters](https://chris.beams.io/posts/git-commit/#limit-50)
+1. [Capitalize the subject line](https://chris.beams.io/posts/git-commit/#capitalize)
+1. [Do not end the subject line with a period](https://chris.beams.io/posts/git-commit/#end)
+1. [Use the imperative mood in the subject line](https://chris.beams.io/posts/git-commit/#imperative)
+1. [Wrap the body at 72 characters](https://chris.beams.io/posts/git-commit/#wrap-72)
+1. [Use the body to explain what and why vs. how](https://chris.beams.io/posts/git-commit/#why-not-how)
+
+If the code affects a particular subsystem, prefix the subject line with the
+name of that subsystem in square brackets, omitting any "zerocopy" prefix
+(that's implicit). For example, for a commit adding a feature to the
+zerocopy-derive crate:
+
+```text
+[derive] Support AsBytes on types with parameters
+```
+
+The body may be omitted if the subject is self-explanatory; e.g. when fixing a
+typo. The git book contains a [Commit Guidelines][commit_guidelines] section
+with much of the same advice, and the list above is part of a [blog
+post][beams_git_commit] by [Chris Beams][chris_beams].
+
+Commit messages should make use of issue integration. Including an issue
+reference like `#123` will cause the GitHub UI to link the text of that
+reference to the referenced issue, and will also make it so that the referenced
+issue back-links to the commit. Use "Closes", "Fixes", or "Resolves" on its own
+line to automatically close an issue when your commit is merged:
+
+```text
+Closes #123
+Fixes #123
+Resolves #123
+```
+
+When using issue integration, don't omit necessary context that may also be
+included in the relevant issue (see "Commit messages should be _concise_ but
+self-contained" above). Git history is more likely to be retained indefinitely
+than issue history (for example, if this repository is migrated away from GitHub
+at some point in the future).
+
+Commit messages should never contain references to any of:
+
+1. Relative moments in time
+1. Non-public URLs
+1. Individuals
+1. Hosted code reviews (such as on https://github.com/google/zerocopy/pulls)
+ + Refer to commits in this repository by their SHA-1 hash
+ + Refer to commits in other repositories by public web address (such as
+ https://github.com/google/zerocopy/commit/789b3deb)
+1. Other entities which may not make sense to arbitrary future readers
+
+## Community Guidelines
+
+This project follows [Google's Open Source Community
+Guidelines][google_open_source_guidelines].
+
+[about_pull_requests]: https://help.github.com/articles/about-pull-requests/
+[beams_git_commit]: https://chris.beams.io/posts/git-commit/
+[cargo_example]: http://xion.io/post/code/rust-examples.html
+[change_detector_tests]: https://testing.googleblog.com/2015/01/testing-on-toilet-change-detector-tests.html
+[chris_beams]: https://chris.beams.io/
+[commit_guidelines]: https://www.git-scm.com/book/en/v2/Distributed-Git-Contributing-to-a-Project#_commit_guidelines
+[determinism]: https://fuchsia.dev/fuchsia-src/contribute/testing/best-practices#write_reproducible_deterministic_tests
+[flutter_philosophy]: https://github.com/flutter/flutter/wiki/Style-guide-for-Flutter-repo#philosophy
+[good-first-issue]: https://github.com/google/zerocopy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22
+[google_open_source_guidelines]: https://opensource.google/conduct/
+[magic_number]: https://en.wikipedia.org/wiki/Magic_number_(programming)
+[miri]: https://github.com/rust-lang/miri
+[trybuild]: https://crates.io/crates/trybuild
+[undefined_behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
diff --git a/Cargo.toml b/Cargo.toml
index 271d552..802ec0b 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -10,26 +10,66 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
+rust-version = "1.61.0"
name = "zerocopy"
-version = "0.6.1"
+version = "0.7.5"
authors = ["Joshua Liebow-Feeser <joshlf@google.com>"]
-include = ["src/*", "Cargo.toml"]
+exclude = [".*"]
description = "Utilities for zero-copy parsing and serialization"
-license-file = "LICENSE"
-repository = "https://fuchsia.googlesource.com/fuchsia/+/HEAD/src/lib/zerocopy"
+readme = "README.md"
+license = "BSD-2-Clause"
+repository = "https://github.com/google/zerocopy"
+
+[package.metadata.ci]
+pinned-nightly = "nightly-2023-05-25"
+pinned-stable = "1.69.0"
+
[package.metadata.docs.rs]
all-features = true
+
[dependencies.byteorder]
version = "1.3"
+optional = true
default-features = false
[dependencies.zerocopy-derive]
-version = "0.3.1"
+version = "=0.7.5"
+optional = true
+
+[dev-dependencies.assert_matches]
+version = "1.5"
+
+[dev-dependencies.itertools]
+version = "0.11"
+
[dev-dependencies.rand]
-version = "0.6"
+version = "0.8.5"
+features = ["small_rng"]
+
+[dev-dependencies.rustversion]
+version = "1.0"
+
+[dev-dependencies.static_assertions]
+version = "1.1"
+
+[dev-dependencies.trybuild]
+version = "=1.0.80"
+
+[dev-dependencies.zerocopy-derive]
+version = "=0.7.5"
[features]
+__internal_use_only_features_that_work_on_stable = [
+ "alloc",
+ "derive",
+ "simd",
+]
alloc = []
+default = ["byteorder"]
+derive = ["zerocopy-derive"]
simd = []
simd-nightly = ["simd"]
+
+[target."cfg(any())".dependencies.zerocopy-derive]
+version = "=0.7.5"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index dfe576c..aa05c44 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -2,33 +2,68 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-# This file is used when publishing to crates.io
+# Put both crates in a single workspace so that `trybuild` compiler errors have
+# paths that are stable regardless of the path to the repository root. This
+# avoids issues like:
+# https://github.com/dtolnay/trybuild/issues/207#issuecomment-131227.594
+[workspace]
[package]
-edition = "2018"
+edition = "2021"
name = "zerocopy"
-version = "0.6.1"
+version = "0.7.5"
authors = ["Joshua Liebow-Feeser <joshlf@google.com>"]
description = "Utilities for zero-copy parsing and serialization"
-license-file = "../../../LICENSE"
-repository = "https://fuchsia.googlesource.com/fuchsia/+/HEAD/src/lib/zerocopy"
+license = "BSD-2-Clause"
+repository = "https://github.com/google/zerocopy"
+rust-version = "1.61.0"
-include = ["src/*", "Cargo.toml"]
+exclude = [".*"]
[package.metadata.docs.rs]
all-features = true
+[package.metadata.ci]
+# The versions of the stable and nightly compiler toolchains to use in CI.
+pinned-stable = "1.69.0"
+pinned-nightly = "nightly-2023-05-25"
+
[features]
+default = ["byteorder"]
+
alloc = []
+derive = ["zerocopy-derive"]
simd = []
simd-nightly = ["simd"]
+# This feature depends on all other features that work on the stable compiler.
+# We make no stability guarantees about this feature; it may be modified or
+# removed at any time.
+__internal_use_only_features_that_work_on_stable = ["alloc", "derive", "simd"]
[dependencies]
-zerocopy-derive = "0.3.1"
+zerocopy-derive = { version = "=0.7.5", path = "zerocopy-derive", optional = true }
[dependencies.byteorder]
version = "1.3"
default-features = false
+optional = true
+
+# The "associated proc macro pattern" ensures that the versions of zerocopy and
+# zerocopy-derive remain equal, even if the 'derive' feature isn't used.
+# See: https://github.com/matklad/macro-dep-test
+[target.'cfg(any())'.dependencies]
+zerocopy-derive = { version = "=0.7.5", path = "zerocopy-derive" }
[dev-dependencies]
-rand = "0.6"
+assert_matches = "1.5"
+itertools = "0.11"
+rand = { version = "0.8.5", features = ["small_rng"] }
+rustversion = "1.0"
+static_assertions = "1.1"
+# Pinned to a specific version so that the version used for local development
+# and the version used in CI are guaranteed to be the same. Future versions
+# sometimes change the output format slightly, so a version mismatch can cause
+# CI test failures.
+trybuild = "=1.0.80"
+# In tests, unlike in production, zerocopy-derive is not optional
+zerocopy-derive = { version = "=0.7.5", path = "zerocopy-derive" }
diff --git a/INTERNAL.md b/INTERNAL.md
new file mode 100644
index 0000000..4c9ed6a
--- /dev/null
+++ b/INTERNAL.md
@@ -0,0 +1,33 @@
+# Internal details
+
+This file documents various internal details of zerocopy and its infrastructure
+that consumers don't need to be concerned about. It focuses on details that
+affect multiple files, and allows each affected code location to reference this
+document rather than requiring us to repeat the same explanation in multiple
+locations.
+
+## CI and toolchain versions
+
+In CI (`.github/workflows/ci.yml`), we pin to specific versions or dates of the
+stable and nightly toolchains. The reason is twofold: First, our UI tests (see
+`tests/trybuild.rs` and `zerocopy-derive/tests/trybuild.rs`) depend on the
+format of rustc's error messages, and that format can change between toolchain
+versions (we also maintain multiple copies of our UI tests - one for each
+toolchain version pinned in CI - for this reason). Second, not all nightlies
+have a working Miri, so we need to pin to one that does (see
+https://rust-lang.github.io/rustup-components-history/).
+
+Updating the versions pinned in CI may cause the UI tests to break. In order to
+fix UI tests after a version update, set the environment variable
+`TRYBUILD=overwrite` while running `cargo test`.
+
+## Crate versions
+
+We ensure that the crate versions of zerocopy and zerocopy-derive are always the
+same in-tree, and that zerocopy depends upon zerocopy-derive using an exact
+version match to the current version in-tree. This has the result that, even
+when published on crates.io, both crates effectively constitute a single atomic
+version. So long as the code in zerocopy is compatible with the code in
+zerocopy-derive in the same Git commit, then publishing them both is fine. This
+frees us from the normal task of reasoning about compatibility with a range of
+semver-compatible versions of different crates. \ No newline at end of file
diff --git a/METADATA b/METADATA
index 0094c01..2b18dc0 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/zerocopy
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
+
name: "zerocopy"
description: "Utilities for zero-copy parsing and serialization"
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/zerocopy/zerocopy-0.6.1.crate"
+ value: "https://static.crates.io/crates/zerocopy/zerocopy-0.7.5.crate"
}
- version: "0.6.1"
+ version: "0.7.5"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 11
- day: 18
+ year: 2023
+ month: 9
+ day: 28
}
}
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d9d3d90
--- /dev/null
+++ b/README.md
@@ -0,0 +1,91 @@
+<!-- Copyright 2022 The Fuchsia Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+
+WARNING: DO NOT EDIT THIS FILE. It is generated automatically. Edits should be
+made in the doc comment on `src/lib.rs` or in `generate-readme.sh`.
+-->
+
+# zerocopy
+
+*<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
+Fill out our [user survey][user-survey]!</span>*
+
+***<span style="font-size: 140%">Fast, safe, <span
+style="color:red;">compile error</span>. Pick two.</span>***
+
+Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
+so you don't have to.
+
+## Overview
+
+Zerocopy provides four core marker traits, each of which can be derived
+(e.g., `#[derive(FromZeroes)]`):
+- `FromZeroes` indicates that a sequence of zero bytes represents a valid
+ instance of a type
+- `FromBytes` indicates that a type may safely be converted from an
+ arbitrary byte sequence
+- `AsBytes` indicates that a type may safely be converted *to* a byte
+ sequence
+- `Unaligned` indicates that a type's alignment requirement is 1
+
+Types which implement a subset of these traits can then be converted to/from
+byte sequences with little to no runtime overhead.
+
+Zerocopy also provides byte-order aware integer types that support these
+conversions; see the `byteorder` module. These types are especially useful
+for network parsing.
+
+[user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
+
+## Cargo Features
+
+- **`alloc`**
+ By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
+ the `alloc` crate is added as a dependency, and some allocation-related
+ functionality is added.
+
+- **`byteorder`** (enabled by default)
+ Adds the `byteorder` module and a dependency on the `byteorder` crate.
+ The `byteorder` module provides byte order-aware equivalents of the
+ multi-byte primitive numerical types. Unlike their primitive equivalents,
+ the types in this module have no alignment requirement and support byte
+ order conversions. This can be useful in handling file formats, network
+ packet layouts, etc which don't provide alignment guarantees and which may
+ use a byte order different from that of the execution platform.
+
+- **`derive`**
+ Provides derives for the core marker traits via the `zerocopy-derive`
+ crate. These derives are re-exported from `zerocopy`, so it is not
+ necessary to depend on `zerocopy-derive` directly.
+
+ However, you may experience better compile times if you instead directly
+ depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
+ since doing so will allow Rust to compile these crates in parallel. To do
+ so, do *not* enable the `derive` feature, and list both dependencies in
+ your `Cargo.toml` with the same leading non-zero version number; e.g:
+
+ ```toml
+ [dependencies]
+ zerocopy = "0.X"
+ zerocopy-derive = "0.X"
+ ```
+
+- **`simd`**
+ When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
+ `AsBytes` impls are emitted for all stable SIMD types which exist on the
+ target platform. Note that the layout of SIMD types is not yet stabilized,
+ so these impls may be removed in the future if layout changes make them
+ invalid. For more information, see the Unsafe Code Guidelines Reference
+ page on the [layout of packed SIMD vectors][simd-layout].
+
+- **`simd-nightly`**
+ Enables the `simd` feature and adds support for SIMD types which are only
+ available on nightly. Since these types are unstable, support for any type
+ may be removed at any point in the future.
+
+[simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
+
+## Disclaimer
+
+Disclaimer: Zerocopy is not an officially supported Google product.
diff --git a/cargo.sh b/cargo.sh
new file mode 100755
index 0000000..7dacf84
--- /dev/null
+++ b/cargo.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Copyright 2023 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script is a thin wrapper around Cargo that provides human-friendly
+# toolchain names which are automatically translated to the toolchain versions
+# we have pinned in CI.
+#
+# cargo.sh --version <toolchain-name> # looks up the version for the named toolchain
+# cargo.sh +<toolchain-name> [...] # runs cargo commands with the named toolchain
+# cargo.sh +all [...] # runs cargo commands with each toolchain
+#
+# The meta-toolchain "all" instructs this script to run the provided command
+# once for each toolchain (msrv, stable, nightly).
+#
+# A common task that is especially annoying to perform by hand is to update
+# trybuild's stderr files. Using this script:
+#
+# TRYBUILD=overwrite ./cargo.sh +all test --workspace
+
+set -eo pipefail
+
+function print-usage-and-exit {
+ echo "Usage:" >&2
+ echo " $0 --version <toolchain-name>" >&2
+ echo " $0 +<toolchain-name> [...]" >&2
+ echo " $0 +all [...]" >&2
+ exit 1
+}
+
+[[ $# -gt 0 ]] || print-usage-and-exit
+
+function pkg-meta {
+ cargo metadata --format-version 1 | jq -r ".packages[] | select(.name == \"zerocopy\").$1"
+}
+
+function lookup-version {
+ VERSION="$1"
+ case "$VERSION" in
+ msrv)
+ pkg-meta rust_version
+ ;;
+ stable)
+ pkg-meta 'metadata.ci."pinned-stable"'
+ ;;
+ nightly)
+ pkg-meta 'metadata.ci."pinned-nightly"'
+ ;;
+ *)
+ echo "Unrecognized toolchain name: '$VERSION' (options are 'msrv', 'stable', 'nightly')" >&2
+ return 1
+ ;;
+ esac
+}
+
+case "$1" in
+ # cargo.sh --version <toolchain-name>
+ --version)
+ [[ $# -eq 2 ]] || print-usage-and-exit
+ lookup-version "$2"
+ ;;
+ # cargo.sh +all [...]
+ +all)
+ echo "[cargo.sh] warning: running the same command for each toolchain (msrv, stable, nightly)" >&2
+ for toolchain in msrv stable nightly; do
+ echo "[cargo.sh] running with toolchain: $toolchain" >&2
+ TOOLCHAIN="$(lookup-version $toolchain)"
+ cargo "+$TOOLCHAIN" ${@:2}
+ done
+ exit 0
+ ;;
+ # cargo.sh +<toolchain-name> [...]
+ +*)
+ TOOLCHAIN="$(lookup-version ${1:1})"
+ cargo "+$TOOLCHAIN" ${@:2}
+ ;;
+ *)
+ print-usage-and-exit
+ ;;
+esac
diff --git a/cargo2android.json b/cargo2android.json
index 92fc80c..d665005 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -1,14 +1,15 @@
{
"device": true,
"run": true,
+ "features": "derive,byteorder",
"variants": [
{
- "tests": true
+ "tests": false
},
{
"alloc": true,
"dependency-suffix": "_nostd",
- "features": "alloc",
+ "features": "alloc,derive,byteorder",
"force-rlib": true,
"no-host": true,
"suffix": "_nostd",
@@ -16,6 +17,7 @@
},
{
"dependency-suffix": "_nostd",
+ "features": "derive,byteorder",
"force-rlib": true,
"no-host": true,
"suffix": "_nostd_noalloc",
diff --git a/generate-readme.sh b/generate-readme.sh
new file mode 100755
index 0000000..b900737
--- /dev/null
+++ b/generate-readme.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -eo pipefail
+
+COPYRIGHT_HEADER=$(mktemp)
+BODY=$(mktemp)
+DISCLAIMER_FOOTER=$(mktemp)
+
+cat > $COPYRIGHT_HEADER <<'EOF'
+<!-- Copyright 2022 The Fuchsia Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+
+WARNING: DO NOT EDIT THIS FILE. It is generated automatically. Edits should be
+made in the doc comment on `src/lib.rs` or in `generate-readme.sh`.
+-->
+
+EOF
+
+# This uses the `cargo readme` tool, which you can install via `cargo install
+# cargo-readme --version 3.2.0`.
+#
+# The `sed` command is used to strip code links like:
+#
+# /// Here is a link to [`Vec`].
+#
+# These links don't work in a Markdown file, and so we remove the `[` and `]`
+# characters to convert them to non-link code snippets.
+cargo readme --no-license | sed 's/\[\(`[^`]*`\)]/\1/g' > $BODY
+
+cat > $DISCLAIMER_FOOTER <<'EOF'
+
+## Disclaimer
+
+Disclaimer: Zerocopy is not an officially supported Google product.
+EOF
+
+cat $COPYRIGHT_HEADER $BODY $DISCLAIMER_FOOTER
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644
index 0000000..0b0d02c
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1,15 @@
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+edition = "2021"
+
+# The "Default" setting has a heuristic which splits lines too aggresively.
+# We are willing to revisit this setting in future versions of rustfmt.
+# Bugs:
+# * https://github.com/rust-lang/rustfmt/issues/3119
+# * https://github.com/rust-lang/rustfmt/issues/3120
+use_small_heuristics = "Max"
+
+# Prevent carriage returns
+newline_style = "Unix"
diff --git a/src/byteorder.rs b/src/byteorder.rs
index e42d3a1..ecee7a0 100644
--- a/src/byteorder.rs
+++ b/src/byteorder.rs
@@ -7,46 +7,50 @@
//! This module contains equivalents of the native multi-byte integer types with
//! no alignment requirement and supporting byte order conversions.
//!
-//! For each native multi-byte integer type - `u16`, `i16`, `u32`, etc - an
-//! equivalent type is defined by this module - [`U16`], [`I16`], [`U32`], etc.
-//! Unlike their native counterparts, these types have alignment 1, and take a
-//! type parameter specifying the byte order in which the bytes are stored in
-//! memory. Each type implements the [`FromBytes`], [`AsBytes`], and
-//! [`Unaligned`] traits.
+//! For each native multi-byte integer type - `u16`, `i16`, `u32`, etc - and
+//! floating point type - `f32` and `f64` - an equivalent type is defined by
+//! this module - [`U16`], [`I16`], [`U32`], [`F64`], etc. Unlike their native
+//! counterparts, these types have alignment 1, and take a type parameter
+//! specifying the byte order in which the bytes are stored in memory. Each type
+//! implements the [`FromBytes`], [`AsBytes`], and [`Unaligned`] traits.
//!
-//! These two properties, taken together, make these types very useful for
-//! defining data structures whose memory layout matches a wire format such as
-//! that of a network protocol or a file format. Such formats often have
-//! multi-byte values at offsets that do not respect the alignment requirements
-//! of the equivalent native types, and stored in a byte order not necessarily
-//! the same as that of the target platform.
+//! These two properties, taken together, make these types useful for defining
+//! data structures whose memory layout matches a wire format such as that of a
+//! network protocol or a file format. Such formats often have multi-byte values
+//! at offsets that do not respect the alignment requirements of the equivalent
+//! native types, and stored in a byte order not necessarily the same as that of
+//! the target platform.
+//!
+//! Type aliases are provided for common byte orders in the [`big_endian`],
+//! [`little_endian`], [`network_endian`], and [`native_endian`] submodules.
//!
//! # Example
//!
//! One use of these types is for representing network packet formats, such as
//! UDP:
//!
-//! ```edition2018
-//! # use zerocopy::*;
-//! use ::byteorder::NetworkEndian;
+//! ```rust,edition2021
+//! # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
+//! use zerocopy::{AsBytes, ByteSlice, FromBytes, FromZeroes, Ref, Unaligned};
+//! use zerocopy::byteorder::network_endian::U16;
//!
-//! #[derive(FromBytes, AsBytes, Unaligned)]
+//! #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
//! #[repr(C)]
//! struct UdpHeader {
-//! src_port: U16<NetworkEndian>,
-//! dst_port: U16<NetworkEndian>,
-//! length: U16<NetworkEndian>,
-//! checksum: U16<NetworkEndian>,
+//! src_port: U16,
+//! dst_port: U16,
+//! length: U16,
+//! checksum: U16,
//! }
//!
//! struct UdpPacket<B: ByteSlice> {
-//! header: LayoutVerified<B, UdpHeader>,
+//! header: Ref<B, UdpHeader>,
//! body: B,
//! }
//!
//! impl<B: ByteSlice> UdpPacket<B> {
//! fn parse(bytes: B) -> Option<UdpPacket<B>> {
-//! let (header, body) = LayoutVerified::new_from_prefix(bytes)?;
+//! let (header, body) = Ref::new_from_prefix(bytes)?;
//! Some(UdpPacket { header, body })
//! }
//!
@@ -56,28 +60,27 @@
//!
//! // more getters...
//! }
+//! # }
//! ```
-use core::convert::{TryFrom, TryInto};
-use core::fmt::{self, Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex};
-use core::marker::PhantomData;
-use core::num::TryFromIntError;
-
-use zerocopy_derive::*;
+use core::{
+ convert::{TryFrom, TryInto},
+ fmt::{self, Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex},
+ marker::PhantomData,
+ num::TryFromIntError,
+};
-use crate::AsBytes;
-// This allows the custom derives to work. See the comment on this module for an
-// explanation.
-use crate::zerocopy;
-
-// NOTE: We don't reexport `WriteBytesExt` or `ReadBytesExt` because those are
-// only available with the `std` feature enabled, and zerocopy is `no_std` by
+// We don't reexport `WriteBytesExt` or `ReadBytesExt` because those are only
+// available with the `std` feature enabled, and zerocopy is `no_std` by
// default.
-pub use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian, NetworkEndian, BE, LE};
+pub use ::byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian, NetworkEndian, BE, LE};
+
+use super::*;
macro_rules! impl_fmt_trait {
($name:ident, $native:ident, $trait:ident) => {
impl<O: ByteOrder> $trait for $name<O> {
+ #[inline(always)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
$trait::fmt(&self.get(), f)
}
@@ -85,6 +88,26 @@ macro_rules! impl_fmt_trait {
};
}
+macro_rules! impl_fmt_traits {
+ ($name:ident, $native:ident, "floating point number") => {
+ impl_fmt_trait!($name, $native, Display);
+ };
+ ($name:ident, $native:ident, "unsigned integer") => {
+ impl_fmt_traits!($name, $native, @all_traits);
+ };
+ ($name:ident, $native:ident, "signed integer") => {
+ impl_fmt_traits!($name, $native, @all_traits);
+ };
+
+ ($name:ident, $native:ident, @all_traits) => {
+ impl_fmt_trait!($name, $native, Display);
+ impl_fmt_trait!($name, $native, Octal);
+ impl_fmt_trait!($name, $native, LowerHex);
+ impl_fmt_trait!($name, $native, UpperHex);
+ impl_fmt_trait!($name, $native, Binary);
+ };
+}
+
macro_rules! doc_comment {
($x:expr, $($tt:tt)*) => {
#[doc = $x]
@@ -93,7 +116,7 @@ macro_rules! doc_comment {
}
macro_rules! define_max_value_constant {
- ($name:ident, $bytes:expr, unsigned) => {
+ ($name:ident, $bytes:expr, "unsigned integer") => {
/// The maximum value.
///
/// This constant should be preferred to constructing a new value using
@@ -101,15 +124,15 @@ macro_rules! define_max_value_constant {
/// endianness `O` and the endianness of the platform.
pub const MAX_VALUE: $name<O> = $name([0xFFu8; $bytes], PhantomData);
};
- ($name:ident, $bytes:expr, signed) => {
- // We don't provide maximum and minimum value constants for signed
- // values because there's no way to do it generically - it would require
- // a different value depending on the value of the ByteOrder type
- // parameter. Currently, one workaround would be to provide
- // implementations for concrete implementations of that trait. In the
- // long term, if we are ever able to make the `new` constructor a const
- // fn, we could use that instead.
- };
+ // We don't provide maximum and minimum value constants for signed values
+ // and floats because there's no way to do it generically - it would require
+ // a different value depending on the value of the `ByteOrder` type
+ // parameter. Currently, one workaround would be to provide implementations
+ // for concrete implementations of that trait. In the long term, if we are
+ // ever able to make the `new` constructor a const fn, we could use that
+ // instead.
+ ($name:ident, $bytes:expr, "signed integer") => {};
+ ($name:ident, $bytes:expr, "floating point number") => {};
}
macro_rules! define_type {
@@ -120,12 +143,14 @@ macro_rules! define_type {
$bytes:expr,
$read_method:ident,
$write_method:ident,
- $sign:ident,
+ $number_kind:tt,
[$($larger_native:ty),*],
- [$($larger_byteorder:ident),*]) => {
+ [$($larger_native_try:ty),*],
+ [$($larger_byteorder:ident),*],
+ [$($larger_byteorder_try:ident),*]) => {
doc_comment! {
- concat!("A ", stringify!($bits), "-bit ", stringify!($sign), " integer
-stored in `O` byte order.
+ concat!("A ", stringify!($bits), "-bit ", $number_kind,
+ " stored in `O` byte order.
`", stringify!($name), "` is like the native `", stringify!($native), "` type with
two major differences: First, it has no alignment requirement (its alignment is 1).
@@ -150,27 +175,30 @@ example of how it can be used for parsing UDP packets.
[`FromBytes`]: crate::FromBytes
[`AsBytes`]: crate::AsBytes
[`Unaligned`]: crate::Unaligned"),
- #[derive(FromBytes, Unaligned, Copy, Clone, Eq, PartialEq, Hash)]
+ #[derive(Copy, Clone, Eq, PartialEq, Hash)]
+ #[cfg_attr(any(feature = "derive", test), derive(FromZeroes, FromBytes, AsBytes, Unaligned))]
#[repr(transparent)]
pub struct $name<O>([u8; $bytes], PhantomData<O>);
}
+ safety_comment! {
+ /// SAFETY:
+ /// `$name<O>` is `repr(transparent)`, and so it has the same layout
+ /// as its only non-zero field, which is a `u8` array. `u8` arrays
+ /// are `FromZeroes`, `FromBytes`, `AsBytes`, and `Unaligned`.
+ impl_or_verify!(O => FromZeroes for $name<O>);
+ impl_or_verify!(O => FromBytes for $name<O>);
+ impl_or_verify!(O => AsBytes for $name<O>);
+ impl_or_verify!(O => Unaligned for $name<O>);
+ }
+
impl<O> Default for $name<O> {
+ #[inline(always)]
fn default() -> $name<O> {
$name::ZERO
}
}
- // TODO(joshlf): Replace this with #[derive(AsBytes)] once that derive
- // supports type parameters.
- unsafe impl<O: ByteOrder> AsBytes for $name<O> {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
- }
-
impl<O> $name<O> {
/// The value zero.
///
@@ -179,21 +207,23 @@ example of how it can be used for parsing UDP packets.
/// on the endianness and platform.
pub const ZERO: $name<O> = $name([0u8; $bytes], PhantomData);
- define_max_value_constant!($name, $bytes, $sign);
+ define_max_value_constant!($name, $bytes, $number_kind);
/// Constructs a new value from bytes which are already in the
/// endianness `O`.
+ #[inline(always)]
pub const fn from_bytes(bytes: [u8; $bytes]) -> $name<O> {
$name(bytes, PhantomData)
}
}
impl<O: ByteOrder> $name<O> {
- // TODO(joshlf): Make these const fns if the ByteOrder methods ever
- // become const fns.
+ // TODO(joshlf): Make these const fns if the `ByteOrder` methods
+ // ever become const fns.
/// Constructs a new value, possibly performing an endianness swap
/// to guarantee that the returned value has endianness `O`.
+ #[inline(always)]
pub fn new(n: $native) -> $name<O> {
let mut out = $name::default();
O::$write_method(&mut out.0[..], n);
@@ -203,6 +233,7 @@ example of how it can be used for parsing UDP packets.
/// Returns the value as a primitive type, possibly performing an
/// endianness swap to guarantee that the return value has the
/// endianness of the native platform.
+ #[inline(always)]
pub fn get(self) -> $native {
O::$read_method(&self.0[..])
}
@@ -210,35 +241,40 @@ example of how it can be used for parsing UDP packets.
/// Updates the value in place as a primitive type, possibly
/// performing an endianness swap to guarantee that the stored value
/// has the endianness `O`.
+ #[inline(always)]
pub fn set(&mut self, n: $native) {
O::$write_method(&mut self.0[..], n);
}
}
- // NOTE: The reasoning behind which traits to implement here is to only
+ // The reasoning behind which traits to implement here is to only
// implement traits which won't cause inference issues. Notably,
// comparison traits like PartialEq and PartialOrd tend to cause
// inference issues.
impl<O: ByteOrder> From<$name<O>> for [u8; $bytes] {
+ #[inline(always)]
fn from(x: $name<O>) -> [u8; $bytes] {
x.0
}
}
impl<O: ByteOrder> From<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
fn from(bytes: [u8; $bytes]) -> $name<O> {
$name(bytes, PhantomData)
}
}
impl<O: ByteOrder> From<$name<O>> for $native {
+ #[inline(always)]
fn from(x: $name<O>) -> $native {
x.get()
}
}
impl<O: ByteOrder> From<$native> for $name<O> {
+ #[inline(always)]
fn from(x: $native) -> $name<O> {
$name::new(x)
}
@@ -246,14 +282,18 @@ example of how it can be used for parsing UDP packets.
$(
impl<O: ByteOrder> From<$name<O>> for $larger_native {
+ #[inline(always)]
fn from(x: $name<O>) -> $larger_native {
x.get().into()
}
}
+ )*
- impl<O: ByteOrder> TryFrom<$larger_native> for $name<O> {
+ $(
+ impl<O: ByteOrder> TryFrom<$larger_native_try> for $name<O> {
type Error = TryFromIntError;
- fn try_from(x: $larger_native) -> Result<$name<O>, TryFromIntError> {
+ #[inline(always)]
+ fn try_from(x: $larger_native_try) -> Result<$name<O>, TryFromIntError> {
$native::try_from(x).map($name::new)
}
}
@@ -261,53 +301,58 @@ example of how it can be used for parsing UDP packets.
$(
impl<O: ByteOrder, P: ByteOrder> From<$name<O>> for $larger_byteorder<P> {
+ #[inline(always)]
fn from(x: $name<O>) -> $larger_byteorder<P> {
$larger_byteorder::new(x.get().into())
}
}
+ )*
- impl<O: ByteOrder, P: ByteOrder> TryFrom<$larger_byteorder<P>> for $name<O> {
+ $(
+ impl<O: ByteOrder, P: ByteOrder> TryFrom<$larger_byteorder_try<P>> for $name<O> {
type Error = TryFromIntError;
- fn try_from(x: $larger_byteorder<P>) -> Result<$name<O>, TryFromIntError> {
+ #[inline(always)]
+ fn try_from(x: $larger_byteorder_try<P>) -> Result<$name<O>, TryFromIntError> {
x.get().try_into().map($name::new)
}
}
)*
impl<O: ByteOrder> AsRef<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
fn as_ref(&self) -> &[u8; $bytes] {
&self.0
}
}
impl<O: ByteOrder> AsMut<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
fn as_mut(&mut self) -> &mut [u8; $bytes] {
&mut self.0
}
}
impl<O: ByteOrder> PartialEq<$name<O>> for [u8; $bytes] {
+ #[inline(always)]
fn eq(&self, other: &$name<O>) -> bool {
self.eq(&other.0)
}
}
impl<O: ByteOrder> PartialEq<[u8; $bytes]> for $name<O> {
+ #[inline(always)]
fn eq(&self, other: &[u8; $bytes]) -> bool {
self.0.eq(other)
}
}
- impl_fmt_trait!($name, $native, Display);
- impl_fmt_trait!($name, $native, Octal);
- impl_fmt_trait!($name, $native, LowerHex);
- impl_fmt_trait!($name, $native, UpperHex);
- impl_fmt_trait!($name, $native, Binary);
+ impl_fmt_traits!($name, $native, $number_kind);
impl<O: ByteOrder> Debug for $name<O> {
+ #[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- // This results in a format like "U16(42)"
- write!(f, concat!(stringify!($name), "({})"), self.get())
+ // This results in a format like "U16(42)".
+ f.debug_tuple(stringify!($name)).field(&self.get()).finish()
}
}
};
@@ -321,13 +366,41 @@ define_type!(
2,
read_u16,
write_u16,
- unsigned,
+ "unsigned integer",
[u32, u64, u128, usize],
+ [u32, u64, u128, usize],
+ [U32, U64, U128],
[U32, U64, U128]
);
-define_type!(A, U32, u32, 32, 4, read_u32, write_u32, unsigned, [u64, u128], [U64, U128]);
-define_type!(A, U64, u64, 64, 8, read_u64, write_u64, unsigned, [u128], [U128]);
-define_type!(A, U128, u128, 128, 16, read_u128, write_u128, unsigned, [], []);
+define_type!(
+ A,
+ U32,
+ u32,
+ 32,
+ 4,
+ read_u32,
+ write_u32,
+ "unsigned integer",
+ [u64, u128],
+ [u64, u128],
+ [U64, U128],
+ [U64, U128]
+);
+define_type!(
+ A,
+ U64,
+ u64,
+ 64,
+ 8,
+ read_u64,
+ write_u64,
+ "unsigned integer",
+ [u128],
+ [u128],
+ [U128],
+ [U128]
+);
+define_type!(A, U128, u128, 128, 16, read_u128, write_u128, "unsigned integer", [], [], [], []);
define_type!(
An,
I16,
@@ -336,27 +409,117 @@ define_type!(
2,
read_i16,
write_i16,
- signed,
+ "signed integer",
+ [i32, i64, i128, isize],
[i32, i64, i128, isize],
+ [I32, I64, I128],
[I32, I64, I128]
);
-define_type!(An, I32, i32, 32, 4, read_i32, write_i32, signed, [i64, i128], [I64, I128]);
-define_type!(An, I64, i64, 64, 8, read_i64, write_i64, signed, [i128], [I128]);
-define_type!(An, I128, i128, 128, 16, read_i128, write_i128, signed, [], []);
+define_type!(
+ An,
+ I32,
+ i32,
+ 32,
+ 4,
+ read_i32,
+ write_i32,
+ "signed integer",
+ [i64, i128],
+ [i64, i128],
+ [I64, I128],
+ [I64, I128]
+);
+define_type!(
+ An,
+ I64,
+ i64,
+ 64,
+ 8,
+ read_i64,
+ write_i64,
+ "signed integer",
+ [i128],
+ [i128],
+ [I128],
+ [I128]
+);
+define_type!(An, I128, i128, 128, 16, read_i128, write_i128, "signed integer", [], [], [], []);
+define_type!(
+ An,
+ F32,
+ f32,
+ 32,
+ 4,
+ read_f32,
+ write_f32,
+ "floating point number",
+ [f64],
+ [],
+ [F64],
+ []
+);
+define_type!(An, F64, f64, 64, 8, read_f64, write_f64, "floating point number", [], [], [], []);
+
+macro_rules! module {
+ ($name:ident, $trait:ident, $endianness_str:expr) => {
+ /// Numeric primitives stored in
+ #[doc = $endianness_str]
+ /// byte order.
+ pub mod $name {
+ use byteorder::$trait;
+
+ module!(@ty U16, $trait, "16-bit unsigned integer", $endianness_str);
+ module!(@ty U32, $trait, "32-bit unsigned integer", $endianness_str);
+ module!(@ty U64, $trait, "64-bit unsigned integer", $endianness_str);
+ module!(@ty U128, $trait, "128-bit unsigned integer", $endianness_str);
+ module!(@ty I16, $trait, "16-bit signed integer", $endianness_str);
+ module!(@ty I32, $trait, "32-bit signed integer", $endianness_str);
+ module!(@ty I64, $trait, "64-bit signed integer", $endianness_str);
+ module!(@ty I128, $trait, "128-bit signed integer", $endianness_str);
+ module!(@ty F32, $trait, "32-bit floating point number", $endianness_str);
+ module!(@ty F64, $trait, "64-bit floating point number", $endianness_str);
+ }
+ };
+ (@ty $ty:ident, $trait:ident, $desc_str:expr, $endianness_str:expr) => {
+ /// A
+ #[doc = $desc_str]
+ /// stored in
+ #[doc = $endianness_str]
+ /// byte order.
+ pub type $ty = crate::byteorder::$ty<$trait>;
+ };
+}
+
+module!(big_endian, BigEndian, "big-endian");
+module!(little_endian, LittleEndian, "little-endian");
+module!(network_endian, NetworkEndian, "network-endian");
+module!(native_endian, NativeEndian, "native-endian");
#[cfg(test)]
mod tests {
- use byteorder::NativeEndian;
+ use ::byteorder::NativeEndian;
+ use rand::{
+ distributions::{Distribution, Standard},
+ rngs::SmallRng,
+ Rng, SeedableRng,
+ };
- use super::*;
- use crate::{AsBytes, FromBytes, Unaligned};
+ use {
+ super::*,
+ crate::{AsBytes, FromBytes, Unaligned},
+ };
- // A native integer type (u16, i32, etc)
- trait Native: FromBytes + AsBytes + Copy + Eq + Debug {
+ // A native integer type (u16, i32, etc).
+ trait Native: FromBytes + AsBytes + Copy + PartialEq + Debug {
const ZERO: Self;
const MAX_VALUE: Self;
- fn rand() -> Self;
+ type Distribution: Distribution<Self>;
+ const DIST: Self::Distribution;
+
+ fn rand<R: Rng>(rng: &mut R) -> Self {
+ rng.sample(Self::DIST)
+ }
}
trait ByteArray:
@@ -411,12 +574,16 @@ mod tests {
macro_rules! impl_traits {
($name:ident, $native:ident, $bytes:expr, $sign:ident) => {
impl Native for $native {
- const ZERO: $native = 0;
- const MAX_VALUE: $native = ::core::$native::MAX;
-
- fn rand() -> $native {
- rand::random()
- }
+ // For some types, `0 as $native` is required (for example, when
+ // `$native` is a floating-point type; `0` is an integer), but
+ // for other types, it's a trivial cast. In all cases, Clippy
+ // thinks it's dangerous.
+ #[allow(trivial_numeric_casts, clippy::as_conversions)]
+ const ZERO: $native = 0 as $native;
+ const MAX_VALUE: $native = $native::MAX;
+
+ type Distribution = Standard;
+ const DIST: Standard = Standard;
}
impl<O: ByteOrder> ByteOrderType for $name<O> {
@@ -458,6 +625,8 @@ mod tests {
impl_traits!(I32, i32, 4, signed);
impl_traits!(I64, i64, 8, signed);
impl_traits!(I128, i128, 16, signed);
+ impl_traits!(F32, f32, 4, signed);
+ impl_traits!(F64, f64, 8, signed);
macro_rules! call_for_all_types {
($fn:ident, $byteorder:ident) => {
@@ -469,6 +638,8 @@ mod tests {
$fn::<I32<$byteorder>>();
$fn::<I64<$byteorder>>();
$fn::<I128<$byteorder>>();
+ $fn::<F32<$byteorder>>();
+ $fn::<F64<$byteorder>>();
};
}
@@ -482,9 +653,39 @@ mod tests {
}
#[cfg(target_endian = "big")]
- type NonNativeEndian = byteorder::LittleEndian;
+ type NonNativeEndian = LittleEndian;
#[cfg(target_endian = "little")]
- type NonNativeEndian = byteorder::BigEndian;
+ type NonNativeEndian = BigEndian;
+
+ // We use a `u64` seed so that we can use `SeedableRng::seed_from_u64`.
+ // `SmallRng`'s `SeedableRng::Seed` differs by platform, so if we wanted to
+ // call `SeedableRng::from_seed`, which takes a `Seed`, we would need
+ // conditional compilation by `target_pointer_width`.
+ const RNG_SEED: u64 = 0x7A03CAE2F32B5B8F;
+
+ const RAND_ITERS: usize = if cfg!(miri) {
+ // The tests below which use this constant used to take a very long time
+ // on Miri, which slows down local development and CI jobs. We're not
+ // using Miri to check for the correctness of our code, but rather its
+ // soundness, and at least in the context of these particular tests, a
+ // single loop iteration is just as good for surfacing UB as multiple
+ // iterations are.
+ //
+ // As of the writing of this comment, here's one set of measurements:
+ //
+ // $ # RAND_ITERS == 1
+ // $ cargo miri test -- -Z unstable-options --report-time endian
+ // test byteorder::tests::test_native_endian ... ok <0.049s>
+ // test byteorder::tests::test_non_native_endian ... ok <0.061s>
+ //
+ // $ # RAND_ITERS == 1024
+ // $ cargo miri test -- -Z unstable-options --report-time endian
+ // test byteorder::tests::test_native_endian ... ok <25.716s>
+ // test byteorder::tests::test_non_native_endian ... ok <38.127s>
+ 1
+ } else {
+ 1024
+ };
#[test]
fn test_zero() {
@@ -509,8 +710,9 @@ mod tests {
#[test]
fn test_native_endian() {
fn test_native_endian<T: ByteOrderType>() {
- for _ in 0..1024 {
- let native = T::Native::rand();
+ let mut r = SmallRng::seed_from_u64(RNG_SEED);
+ for _ in 0..RAND_ITERS {
+ let native = T::Native::rand(&mut r);
let mut bytes = T::ByteArray::default();
bytes.as_bytes_mut().copy_from_slice(native.as_bytes());
let mut from_native = T::new(native);
@@ -521,7 +723,7 @@ mod tests {
assert_eq!(from_native.into_bytes(), bytes);
assert_eq!(from_bytes.into_bytes(), bytes);
- let updated = T::Native::rand();
+ let updated = T::Native::rand(&mut r);
from_native.set(updated);
assert_eq!(from_native.get(), updated);
}
@@ -533,8 +735,9 @@ mod tests {
#[test]
fn test_non_native_endian() {
fn test_non_native_endian<T: ByteOrderType>() {
- for _ in 0..1024 {
- let native = T::Native::rand();
+ let mut r = SmallRng::seed_from_u64(RNG_SEED);
+ for _ in 0..RAND_ITERS {
+ let native = T::Native::rand(&mut r);
let mut bytes = T::ByteArray::default();
bytes.as_bytes_mut().copy_from_slice(native.as_bytes());
bytes = bytes.invert();
@@ -546,7 +749,7 @@ mod tests {
assert_eq!(from_native.into_bytes(), bytes);
assert_eq!(from_bytes.into_bytes(), bytes);
- let updated = T::Native::rand();
+ let updated = T::Native::rand(&mut r);
from_native.set(updated);
assert_eq!(from_native.get(), updated);
}
@@ -554,4 +757,13 @@ mod tests {
call_for_all_types!(test_non_native_endian, NonNativeEndian);
}
+
+ #[test]
+ fn test_debug_impl() {
+ // Ensure that Debug applies format options to the inner value.
+ let val = U16::<LE>::new(10);
+ assert_eq!(format!("{:?}", val), "U16(10)");
+ assert_eq!(format!("{:03?}", val), "U16(010)");
+ assert_eq!(format!("{:x?}", val), "U16(a)");
+ }
}
diff --git a/src/derive_util.rs b/src/derive_util.rs
new file mode 100644
index 0000000..edf88e3
--- /dev/null
+++ b/src/derive_util.rs
@@ -0,0 +1,127 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Utilities used by `zerocopy-derive`.
+//!
+//! These are defined in `zerocopy` rather than in code generated by
+//! `zerocopy-derive` so that they can be compiled once rather than recompiled
+//! for every pair of type and trait (in other words, if they were defined in
+//! generated code, then deriving `AsBytes` and `FromBytes` on three different
+//! types would result in the code in question being emitted and compiled six
+//! different times).
+
+#![allow(missing_debug_implementations)]
+
+use core::marker::PhantomData;
+
+/// A compile-time check that should be one particular value.
+pub trait ShouldBe<const VALUE: bool> {}
+
+/// A struct for checking whether `T` contains padding.
+pub struct HasPadding<T: ?Sized, const VALUE: bool>(PhantomData<T>);
+
+impl<T: ?Sized, const VALUE: bool> ShouldBe<VALUE> for HasPadding<T, VALUE> {}
+
+/// Does the struct type `$t` have padding?
+///
+/// `$ts` is the list of the type of every field in `$t`. `$t` must be a
+/// struct type, or else `struct_has_padding!`'s result may be meaningless.
+///
+/// Note that `struct_has_padding!`'s results are independent of `repr` since
+/// they only consider the size of the type and the sizes of the fields.
+/// Whatever the repr, the size of the type already takes into account any
+/// padding that the compiler has decided to add. Structs with well-defined
+/// representations (such as `repr(C)`) can use this macro to check for padding.
+/// Note that while this may yield some consistent value for some `repr(Rust)`
+/// structs, it is not guaranteed across platforms or compilations.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! struct_has_padding {
+ ($t:ty, $($ts:ty),*) => {
+ core::mem::size_of::<$t>() > 0 $(+ core::mem::size_of::<$ts>())*
+ };
+}
+
+/// Does the union type `$t` have padding?
+///
+/// `$ts` is the list of the type of every field in `$t`. `$t` must be a
+/// union type, or else `union_has_padding!`'s result may be meaningless.
+///
+/// Note that `union_has_padding!`'s results are independent of `repr` since
+/// they only consider the size of the type and the sizes of the fields.
+/// Whatever the repr, the size of the type already takes into account any
+/// padding that the compiler has decided to add. Unions with well-defined
+/// representations (such as `repr(C)`) can use this macro to check for padding.
+/// Note that while this may yield some consistent value for some `repr(Rust)`
+/// unions, it is not guaranteed across platforms or compilations.
+#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`.
+#[macro_export]
+macro_rules! union_has_padding {
+ ($t:ty, $($ts:ty),*) => {
+ false $(|| core::mem::size_of::<$t>() != core::mem::size_of::<$ts>())*
+ };
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::util::testutil::*;
+
+ #[test]
+ fn test_struct_has_padding() {
+ // Test that, for each provided repr, `struct_has_padding!` reports the
+ // expected value.
+ macro_rules! test {
+ (#[$cfg:meta] ($($ts:ty),*) => $expect:expr) => {{
+ #[$cfg]
+ struct Test($($ts),*);
+ assert_eq!(struct_has_padding!(Test, $($ts),*), $expect);
+ }};
+ (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),*) => $expect:expr) => {
+ test!(#[$cfg] ($($ts),*) => $expect);
+ test!($(#[$cfgs])* ($($ts),*) => $expect);
+ };
+ }
+
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] () => false);
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8) => false);
+ test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8, ()) => false);
+ test!(#[repr(C)] #[repr(packed)] (u8, u8) => false);
+
+ test!(#[repr(C)] (u8, AU64) => true);
+ // Rust won't let you put `#[repr(packed)]` on a type which contains a
+ // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here.
+ // It's not ideal, but it definitely has align > 1 on /some/ of our CI
+ // targets, and this isn't a particularly complex macro we're testing
+ // anyway.
+ test!(#[repr(packed)] (u8, u64) => false);
+ }
+
+ #[test]
+ fn test_union_has_padding() {
+ // Test that, for each provided repr, `union_has_padding!` reports the
+ // expected value.
+ macro_rules! test {
+ (#[$cfg:meta] {$($fs:ident: $ts:ty),*} => $expect:expr) => {{
+ #[$cfg]
+ #[allow(unused)] // fields are never read
+ union Test{ $($fs: $ts),* }
+ assert_eq!(union_has_padding!(Test, $($ts),*), $expect);
+ }};
+ (#[$cfg:meta] $(#[$cfgs:meta])* {$($fs:ident: $ts:ty),*} => $expect:expr) => {
+ test!(#[$cfg] {$($fs: $ts),*} => $expect);
+ test!($(#[$cfgs])* {$($fs: $ts),*} => $expect);
+ };
+ }
+
+ test!(#[repr(C)] #[repr(packed)] {a: u8} => false);
+ test!(#[repr(C)] #[repr(packed)] {a: u8, b: u8} => false);
+
+ // Rust won't let you put `#[repr(packed)]` on a type which contains a
+ // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here.
+ // It's not ideal, but it definitely has align > 1 on /some/ of our CI
+ // targets, and this isn't a particularly complex macro we're testing
+ // anyway.
+ test!(#[repr(C)] #[repr(packed)] {a: u8, b: u64} => true);
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 8241db7..3937911 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -2,14 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//! Utilities for safe zero-copy parsing and serialization.
+// After updating the following doc comment, make sure to run the following
+// command to update `README.md` based on its contents:
+//
+// ./generate-readme.sh > README.md
+
+//! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
+//! Fill out our [user survey][user-survey]!</span>*
+//!
+//! ***<span style="font-size: 140%">Fast, safe, <span
+//! style="color:red;">compile error</span>. Pick two.</span>***
//!
-//! This crate provides utilities which make it easy to perform zero-copy
-//! parsing and serialization by allowing zero-copy conversion to/from byte
-//! slices.
+//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
+//! so you don't have to.
//!
-//! This is enabled by three core marker traits, each of which can be derived
-//! (e.g., `#[derive(FromBytes)]`):
+//! # Overview
+//!
+//! Zerocopy provides four core marker traits, each of which can be derived
+//! (e.g., `#[derive(FromZeroes)]`):
+//! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid
+//! instance of a type
//! - [`FromBytes`] indicates that a type may safely be converted from an
//! arbitrary byte sequence
//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
@@ -19,156 +31,574 @@
//! Types which implement a subset of these traits can then be converted to/from
//! byte sequences with little to no runtime overhead.
//!
-//! Note that these traits are ignorant of byte order. For byte order-aware
-//! types, see the [`byteorder`] module.
+//! Zerocopy also provides byte-order aware integer types that support these
+//! conversions; see the `byteorder` module. These types are especially useful
+//! for network parsing.
+//!
+//! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
//!
-//! # Features
+//! # Cargo Features
//!
-//! `alloc`: By default, `zerocopy` is `no_std`. When the `alloc` feature is
-//! enabled, the `alloc` crate is added as a dependency, and some
-//! allocation-related functionality is added.
+//! - **`alloc`**
+//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
+//! the `alloc` crate is added as a dependency, and some allocation-related
+//! functionality is added.
//!
-//! `simd`: When the `simd` feature is enabled, `FromBytes` and `AsBytes` impls
-//! are emitted for all stable SIMD types which exist on the target platform.
-//! Note that the layout of SIMD types is not yet stabilized, so these impls may
-//! be removed in the future if layout changes make them invalid. For more
-//! information, see the Unsafe Code Guidelines Reference page on the [Layout of
-//! packed SIMD vectors][simd-layout].
+//! - **`byteorder`** (enabled by default)
+//! Adds the [`byteorder`] module and a dependency on the `byteorder` crate.
+//! The `byteorder` module provides byte order-aware equivalents of the
+//! multi-byte primitive numerical types. Unlike their primitive equivalents,
+//! the types in this module have no alignment requirement and support byte
+//! order conversions. This can be useful in handling file formats, network
+//! packet layouts, etc which don't provide alignment guarantees and which may
+//! use a byte order different from that of the execution platform.
//!
-//! `simd-nightly`: Enables the `simd` feature and adds support for SIMD types
-//! which are only available on nightly. Since these types are unstable, support
-//! for any type may be removed at any point in the future.
+//! - **`derive`**
+//! Provides derives for the core marker traits via the `zerocopy-derive`
+//! crate. These derives are re-exported from `zerocopy`, so it is not
+//! necessary to depend on `zerocopy-derive` directly.
+//!
+//! However, you may experience better compile times if you instead directly
+//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
+//! since doing so will allow Rust to compile these crates in parallel. To do
+//! so, do *not* enable the `derive` feature, and list both dependencies in
+//! your `Cargo.toml` with the same leading non-zero version number; e.g:
+//!
+//! ```toml
+//! [dependencies]
+//! zerocopy = "0.X"
+//! zerocopy-derive = "0.X"
+//! ```
+//!
+//! - **`simd`**
+//! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
+//! `AsBytes` impls are emitted for all stable SIMD types which exist on the
+//! target platform. Note that the layout of SIMD types is not yet stabilized,
+//! so these impls may be removed in the future if layout changes make them
+//! invalid. For more information, see the Unsafe Code Guidelines Reference
+//! page on the [layout of packed SIMD vectors][simd-layout].
+//!
+//! - **`simd-nightly`**
+//! Enables the `simd` feature and adds support for SIMD types which are only
+//! available on nightly. Since these types are unstable, support for any type
+//! may be removed at any point in the future.
//!
//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
-#![deny(missing_docs)]
+// Sometimes we want to use lints which were added after our MSRV.
+// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
+// this attribute, any unknown lint would cause a CI failure when testing with
+// our MSRV.
+#![allow(unknown_lints)]
+#![deny(renamed_and_removed_lints)]
+#![deny(
+ anonymous_parameters,
+ deprecated_in_future,
+ illegal_floating_point_literal_pattern,
+ late_bound_lifetime_arguments,
+ missing_copy_implementations,
+ missing_debug_implementations,
+ missing_docs,
+ path_statements,
+ patterns_in_fns_without_body,
+ rust_2018_idioms,
+ trivial_numeric_casts,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn,
+ unused_extern_crates,
+ unused_qualifications,
+ variant_size_differences
+)]
+#![deny(
+ clippy::all,
+ clippy::alloc_instead_of_core,
+ clippy::arithmetic_side_effects,
+ clippy::as_underscore,
+ clippy::assertions_on_result_states,
+ clippy::as_conversions,
+ clippy::correctness,
+ clippy::dbg_macro,
+ clippy::decimal_literal_representation,
+ clippy::get_unwrap,
+ clippy::indexing_slicing,
+ clippy::missing_inline_in_public_items,
+ clippy::missing_safety_doc,
+ clippy::obfuscated_if_else,
+ clippy::perf,
+ clippy::print_stdout,
+ clippy::std_instead_of_core,
+ clippy::style,
+ clippy::suspicious,
+ clippy::todo,
+ clippy::undocumented_unsafe_blocks,
+ clippy::unimplemented,
+ clippy::unnested_or_patterns,
+ clippy::unwrap_used,
+ clippy::use_debug
+)]
+#![deny(
+ rustdoc::bare_urls,
+ rustdoc::broken_intra_doc_links,
+ rustdoc::invalid_codeblock_attributes,
+ rustdoc::invalid_html_tags,
+ rustdoc::invalid_rust_codeblocks,
+ rustdoc::missing_crate_level_docs,
+ rustdoc::private_intra_doc_links
+)]
+// In test code, it makes sense to weight more heavily towards concise, readable
+// code over correct or debuggable code.
+#![cfg_attr(test, allow(
+ // In tests, you get line numbers and have access to source code, so panic
+ // messages are less important. You also often unwrap a lot, which would
+ // make expect'ing instead very verbose.
+ clippy::unwrap_used,
+ // In tests, there's no harm to "panic risks" - the worst that can happen is
+ // that your test will fail, and you'll fix it. By contrast, panic risks in
+ // production code introduce the possibly of code panicking unexpectedly "in
+ // the field".
+ clippy::arithmetic_side_effects,
+ clippy::indexing_slicing,
+))]
#![cfg_attr(not(test), no_std)]
-#![recursion_limit = "2048"]
+#![cfg_attr(feature = "simd-nightly", feature(stdsimd))]
+
+#[macro_use]
+mod macros;
+#[cfg(feature = "byteorder")]
pub mod byteorder;
+#[cfg(any(feature = "derive", test))]
+#[doc(hidden)]
+pub mod derive_util;
+// TODO(#252): If we make this pub, come up with a better name.
+mod util;
+mod wrappers;
+#[cfg(feature = "byteorder")]
pub use crate::byteorder::*;
+pub use crate::wrappers::*;
+#[cfg(any(feature = "derive", test))]
pub use zerocopy_derive::*;
-use core::cell::{Ref, RefMut};
-use core::cmp::Ordering;
-use core::fmt::{self, Debug, Display, Formatter};
-use core::marker::PhantomData;
-use core::mem;
-use core::ops::{Deref, DerefMut};
-use core::ptr;
-use core::slice;
-
-// This is a hack to allow derives of FromBytes, AsBytes, and Unaligned to work
-// in this crate. They assume that zerocopy is linked as an extern crate, so
-// they access items from it as `zerocopy::Xxx`. This makes that still work.
+use core::{
+ alloc::Layout,
+ cell::{self, RefMut},
+ cmp::Ordering,
+ fmt::{self, Debug, Display, Formatter},
+ hash::Hasher,
+ marker::PhantomData,
+ mem::{self, ManuallyDrop, MaybeUninit},
+ num::{
+ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
+ NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
+ },
+ ops::{Deref, DerefMut},
+ ptr, slice,
+};
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+#[cfg(feature = "alloc")]
+use {
+ alloc::{boxed::Box, vec::Vec},
+ core::ptr::NonNull,
+};
+
+// This is a hack to allow zerocopy-derive derives to work in this crate. They
+// assume that zerocopy is linked as an extern crate, so they access items from
+// it as `zerocopy::Xxx`. This makes that still work.
+#[cfg(any(feature = "derive", test))]
mod zerocopy {
- pub use crate::*;
+ pub(crate) use crate::*;
}
-// implement an unsafe trait for a range of container types
-macro_rules! impl_for_composite_types {
- ($trait:ident) => {
- unsafe impl<T> $trait for PhantomData<T> {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
- }
- unsafe impl<T: $trait> $trait for [T] {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
- }
- unsafe impl $trait for () {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
- }
- unsafe impl<T: $trait, const N: usize> $trait for [T; N] {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
+/// The layout of a type which might be dynamically-sized.
+///
+/// `DstLayout` describes the layout of sized types, slice types, and "custom
+/// DSTs" - ie, those that are known by the type system to have a trailing slice
+/// (as distinguished from `dyn Trait` types - such types *might* have a
+/// trailing slice type, but the type system isn't aware of it).
+#[doc(hidden)]
+#[allow(missing_debug_implementations, missing_copy_implementations)]
+#[cfg_attr(test, derive(Copy, Clone, Debug, PartialEq, Eq))]
+pub struct DstLayout {
+ /// The base size and the alignment of the type:
+ /// - For sized types, the size encoded by this `Layout` is
+ /// `size_of::<T>()`. For DSTs, the size represents the size of the type
+ /// when the trailing slice field contains 0 elements.
+ /// - For all types, the alignment represents the alignment of the type.
+ // TODO: If we end up replacing this with separate size and alignment to
+ // make Kani happy, file an issue to eventually adopt the stdlib's
+ // `Alignment` type trick.
+ _base_layout: Layout,
+ /// For sized types, `None`. For DSTs, the size of the element type of the
+ /// trailing slice.
+ _trailing_slice_elem_size: Option<usize>,
+}
+
+#[cfg_attr(test, derive(Copy, Clone, Debug))]
+enum _CastType {
+ _Prefix,
+ _Suffix,
+}
+
+impl DstLayout {
+ /// Constructs a `DstLayout` which describes `T`.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code may assume that `DstLayout` is the correct layout for `T`.
+ const fn for_type<T>() -> DstLayout {
+ DstLayout { _base_layout: Layout::new::<T>(), _trailing_slice_elem_size: None }
+ }
+
+ /// Constructs a `DstLayout` which describes `[T]`.
+ ///
+ /// # Safety
+ ///
+ /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`.
+ const fn for_slice<T>() -> DstLayout {
+ DstLayout {
+ // SAFETY: `[T; 0]` has the same alignment as `T`, but zero size.
+ // [1] A slice of length 0 has no size, so 0 is the correct size for
+ // the base of the type.
+ //
+ // [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
+ _base_layout: Layout::new::<[T; 0]>(),
+ _trailing_slice_elem_size: Some(mem::size_of::<T>()),
}
- };
-}
+ }
-/// Implements `$trait` for one or more `$type`s.
-macro_rules! impl_for_types {
- ($trait:ident, $type:ty) => (
- unsafe impl $trait for $type {
- fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
+ /// Validates that a cast is sound from a layout perspective.
+ ///
+ /// Validates that the size and alignment requirements of a type with the
+ /// layout described in `self` would not be violated by performing a
+ /// `cast_type` cast from a pointer with address `addr` which refers to a
+ /// memory region of size `bytes_len`.
+ ///
+ /// If the cast is valid, `validate_cast_and_convert_metadata` returns
+ /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then
+ /// `elems` is the maximum number of trailing slice elements for which a
+ /// cast would be valid (for sized types, `elem` is meaningless and should
+ /// be ignored). `split_at` is the index at which to split the memory region
+ /// in order for the prefix (suffix) to contain the result of the cast, and
+ /// in order for the remaining suffix (prefix) to contain the leftover
+ /// bytes.
+ ///
+ /// There are three conditions under which a cast can fail:
+ /// - The smallest possible value for the type is larger than the provided
+ /// memory region
+ /// - A prefix cast is requested, and `addr` does not satisfy `self`'s
+ /// alignment requirement
+ /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy
+ /// `self`'s alignment requirement (as a consequence, since the size of
+ /// the trailing slice element is a multiple of the alignment, no length
+ /// for the trailing slice will result in a starting address which is
+ /// properly aligned)
+ ///
+ /// # Safety
+ ///
+ /// The caller may assume that this implementation is correct, and may rely
+ /// on that assumption for the soundness of their code. In particular, the
+ /// caller may assume that, if `validate_cast_and_convert_metadata` returns
+ /// `Some((elems, split_at))`, then:
+ /// - A pointer to the type (for dynamically sized types, this includes
+ /// `elems` as its pointer metadata) describes an object of size `size <=
+ /// bytes_len`
+ /// - If this is a prefix cast:
+ /// - `addr` satisfies `self`'s alignment
+ /// - `size == split_at`
+ /// - If this is a suffix cast:
+ /// - `split_at == bytes_len - size`
+ /// - `addr + split_at` satisfies `self`'s alignment
+ ///
+ /// Note that this method does *not* ensure that a pointer constructed from
+ /// its return values will be a valid pointer. In particular, this method
+ /// does not reason about `isize` overflow, which is a requirement of many
+ /// Rust pointer APIs, and may at some point be determined to be a validity
+ /// invariant of pointer types themselves. This should never be a problem so
+ /// long as the arguments to this method are derived from a known-valid
+ /// pointer (e.g., one derived from a safe Rust reference), but it is
+ /// nonetheless the caller's responsibility to justify that pointer
+ /// arithmetic will not overflow based on a safety argument *other than* the
+ /// mere fact that this method returned successfully.
+ ///
+ /// # Panics
+ ///
+ /// If `addr + bytes_len` overflows `usize`,
+ /// `validate_cast_and_convert_metadata` may panic, or it may return
+ /// incorrect results. No guarantees are made about when
+ /// `validate_cast_and_convert_metadata` will panic. The caller should not
+ /// rely on `validate_cast_and_convert_metadata` panicking in any particular
+ /// condition, even if `debug_assertions` are enabled.
+ const fn _validate_cast_and_convert_metadata(
+ &self,
+ addr: usize,
+ bytes_len: usize,
+ cast_type: _CastType,
+ ) -> Option<(usize, usize)> {
+ // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`.
+ macro_rules! __debug_assert {
+ ($e:expr $(, $msg:expr)?) => {
+ debug_assert!({
+ #[allow(clippy::arithmetic_side_effects)]
+ let e = $e;
+ e
+ } $(, $msg)?);
+ };
}
- );
- ($trait:ident, $type:ty, $($types:ty),*) => (
- unsafe impl $trait for $type {
- fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
+
+ // Note that, in practice, `elem_size` is always a compile-time
+ // constant. We do this check earlier than needed to ensure that we
+ // always panic as a result of bugs in the program (such as calling this
+ // function on an invalid type) instead of allowing this panic to be
+ // hidden if the cast would have failed anyway for runtime reasons (such
+ // as a too-small memory region).
+ //
+ // TODO(#67): Once our MSRV is 1.65, use let-else:
+ // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
+ let elem_size = match self._trailing_slice_elem_size {
+ Some(elem_size) => match NonZeroUsize::new(elem_size) {
+ Some(elem_size) => Some(elem_size),
+ None => panic!("attempted to cast to slice type with zero-sized element"),
+ },
+ None => None,
+ };
+
+ // Precondition
+ __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
+
+ // We check alignment for `addr` (for prefix casts) or `addr +
+ // bytes_len` (for suffix casts). For a prefix cast, the correctness of
+ // this check is trivial - `addr` is the address the object will live
+ // at.
+ //
+ // For a suffix cast, we know that all valid sizes for the type are a
+ // multiple of the alignment. Thus, a validly-sized instance which lives
+ // at a validly-aligned address must also end at a validly-aligned
+ // address. Thus, if the end address for a suffix cast (`addr +
+ // bytes_len`) is not aligned, then no valid start address will be
+ // aligned either.
+ let offset = match cast_type {
+ _CastType::_Prefix => 0,
+ _CastType::_Suffix => bytes_len,
+ };
+
+ // Addition is guaranteed not to overflow because `offset <= bytes_len`,
+ // and `addr + bytes_len <= usize::MAX` is a precondition of this
+ // method. Modulus is guaranteed not to divide by 0 because `.align()`
+ // guarantees that its return value is non-zero.
+ #[allow(clippy::arithmetic_side_effects)]
+ if (addr + offset) % self._base_layout.align() != 0 {
+ return None;
}
- impl_for_types!($trait, $($types),*);
- );
-}
-/// Implements `$trait` for all signed and unsigned primitive types.
-macro_rules! impl_for_primitives {
- ($trait:ident) => {
- impl_for_types!(
- $trait, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64
- );
- };
+ let base_size = self._base_layout.size();
+
+ // LEMMA 0: max_slice_bytes + base_size == bytes_len
+ //
+ // LEMMA 1: base_size <= bytes_len:
+ // - If `base_size > bytes_len`, `bytes_len.checked_sub(base_size)`
+ // returns `None`, and we return.
+ //
+ // TODO(#67): Once our MSRV is 1.65, use let-else:
+ // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
+ let max_slice_bytes = if let Some(max_byte_slice) = bytes_len.checked_sub(base_size) {
+ max_byte_slice
+ } else {
+ return None;
+ };
+
+ // Lemma 0
+ __debug_assert!(max_slice_bytes + base_size == bytes_len);
+
+ // Lemma 1
+ __debug_assert!(base_size <= bytes_len);
+
+ let (elems, self_bytes) = if let Some(elem_size) = elem_size {
+ // Guaranteed not to divide by 0 because `elem_size` is a
+ // `NonZeroUsize`.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = max_slice_bytes / elem_size.get();
+
+ // NOTE: Another option for this step in the algorithm is to set
+ // `slice_bytes = elems * elem_size`. However, using multiplication
+ // causes Kani to choke. In practice, the compiler is likely to
+ // generate identical machine code in both cases. Note that this
+ // divide-then-mod approach is trivially optimizable into a single
+ // operation that computes both the quotient and the remainder.
+
+ // First line is guaranteed not to mod by 0 because `elem_size` is a
+ // `NonZeroUsize`. Second line is guaranteed not to underflow
+ // because `rem <= max_slice_bytes` thanks to the mod operation.
+ //
+ // LEMMA 2: slice_bytes <= max_slice_bytes
+ #[allow(clippy::arithmetic_side_effects)]
+ let rem = max_slice_bytes % elem_size.get();
+ #[allow(clippy::arithmetic_side_effects)]
+ let slice_bytes = max_slice_bytes - rem;
+
+ // Lemma 2
+ __debug_assert!(slice_bytes <= max_slice_bytes);
+
+ // Guaranteed not to overflow:
+ // - max_slice_bytes + base_size == bytes_len (lemma 0)
+ // - slice_bytes <= max_slice_bytes (lemma 2)
+ // - slice_bytes + base_size <= bytes_len (substitution) ------+
+ // - bytes_len <= usize::MAX (bytes_len: usize) |
+ // - slice_bytes + base_size <= usize::MAX (substitution) |
+ // |
+ // LEMMA 3: self_bytes <= bytes_len: |
+ // - slice_bytes + base_size <= bytes_len <--------------------------+ (reused for lemma)
+ // - slice_bytes <= bytes_len
+ #[allow(clippy::arithmetic_side_effects)]
+ let self_bytes = base_size + slice_bytes;
+
+ // Lemma 3
+ __debug_assert!(self_bytes <= bytes_len);
+
+ (elems, self_bytes)
+ } else {
+ (0, base_size)
+ };
+
+ // LEMMA 4: self_bytes <= bytes_len:
+ // - `if` branch returns `self_bytes`; lemma 3 guarantees `self_bytes <=
+ // bytes_len`
+ // - `else` branch returns `base_size`; lemma 1 guarantees `base_size <=
+ // bytes_len`
+
+ // Lemma 4
+ __debug_assert!(self_bytes <= bytes_len);
+
+ let split_at = match cast_type {
+ _CastType::_Prefix => self_bytes,
+ // Guaranteed not to underflow because `self_bytes <= bytes_len`
+ // (lemma 4).
+ #[allow(clippy::arithmetic_side_effects)]
+ _CastType::_Suffix => bytes_len - self_bytes,
+ };
+
+ Some((elems, split_at))
+ }
}
-/// Types for which any byte pattern is valid.
+/// A trait which carries information about a type's layout that is used by the
+/// internals of this crate.
+///
+/// This trait is not meant for consumption by code outside of this crate. While
+/// the normal semver stability guarantees apply with respect to which types
+/// implement this trait and which trait implementations are implied by this
+/// trait, no semver stability guarantees are made regarding its internals; they
+/// may change at any time, and code which makes use of them may break.
+///
+/// # Safety
+///
+/// This trait does not convey any safety guarantees to code outside this crate.
+#[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs
+pub unsafe trait KnownLayout: sealed::KnownLayoutSealed {
+ #[doc(hidden)]
+ const LAYOUT: DstLayout;
+}
+
+impl<T: KnownLayout> sealed::KnownLayoutSealed for [T] {}
+// SAFETY: Delegates safety to `DstLayout::for_slice`.
+unsafe impl<T: KnownLayout> KnownLayout for [T] {
+ const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
+}
+
+#[rustfmt::skip]
+impl_known_layout!(
+ (),
+ u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
+ bool, char,
+ NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
+ NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
+);
+#[rustfmt::skip]
+impl_known_layout!(
+ T => Option<T>,
+ T: ?Sized => PhantomData<T>,
+ T => Wrapping<T>,
+ T => MaybeUninit<T>,
+);
+impl_known_layout!(const N: usize, T => [T; N]);
+
+safety_comment! {
+ /// SAFETY:
+ /// `str` and `ManuallyDrop<[T]>` have the same representations as `[u8]`
+ /// and `[T]` repsectively. `str` has different bit validity than `[u8]`,
+ /// but that doesn't affect the soundness of this impl.
+ unsafe_impl_known_layout!(#[repr([u8])] str);
+ unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
+}
+
+/// Types for which a sequence of bytes all set to zero represents a valid
+/// instance of the type.
///
/// WARNING: Do not implement this trait yourself! Instead, use
-/// `#[derive(FromBytes)]`.
+/// `#[derive(FromZeroes)]` (requires the `derive` Cargo feature).
///
-/// `FromBytes` types can safely be deserialized from an untrusted sequence of
-/// bytes because any byte sequence corresponds to a valid instance of the type.
+/// Any memory region of the appropriate length which is guaranteed to contain
+/// only zero bytes can be viewed as any `FromZeroes` type with no runtime
+/// overhead. This is useful whenever memory is known to be in a zeroed state,
+/// such memory returned from some allocation routines.
///
-/// `FromBytes` is ignorant of byte order. For byte order-aware types, see the
+/// `FromZeroes` is ignorant of byte order. For byte order-aware types, see the
/// [`byteorder`] module.
///
/// # Safety
///
-/// If `T: FromBytes`, then unsafe code may assume that it is sound to treat any
-/// initialized sequence of bytes of length `size_of::<T>()` as a `T`. If a type
-/// is marked as `FromBytes` which violates this contract, it may cause
-/// undefined behavior.
+/// *This section describes what is required in order for `T: FromZeroes`, and
+/// what unsafe code may assume of such types. `#[derive(FromZeroes)]` only
+/// permits types which satisfy these requirements. If you don't plan on
+/// implementing `FromZeroes` manually, and you don't plan on writing unsafe
+/// code that operates on `FromZeroes` types, then you don't need to read this
+/// section.*
///
-/// If a type has the following properties, then it is safe to implement
-/// `FromBytes` for that type:
-/// - If the type is a struct:
-/// - All of its fields must implement `FromBytes`
-/// - If the type is an enum:
-/// - It must be a C-like enum (meaning that all variants have no fields)
-/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
-/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
-/// - The maximum number of discriminants must be used (so that every possible
-/// bit pattern is a valid one). Be very careful when using the `C`,
-/// `usize`, or `isize` representations, as their size is
-/// platform-dependent.
+/// If `T: FromZeroes`, then unsafe code may assume that:
+/// - It is sound to treat any initialized sequence of zero bytes of length
+/// `size_of::<T>()` as a `T`.
+/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
+/// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to
+/// construct a `t: &T` at the same address as `b`, and it is sound for both
+/// `b` and `t` to be live at the same time.
+///
+/// If a type is marked as `FromZeroes` which violates this contract, it may
+/// cause undefined behavior.
+///
+/// If a type has the following properties, then it is sound to implement
+/// `FromZeroes` for that type:
+/// - If the type is a struct, all of its fields must satisfy the requirements
+/// to be `FromZeroes` (they do not actually have to be `FromZeroes`).
+/// - If the type is an enum, it must be C-like (meaning that all variants have
+/// no fields) and it must have a variant with a discriminant of `0`. See [the
+/// reference] for a description of how discriminant values are chosen.
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes
+/// (`FromZeroes` is not currently implemented for, e.g.,
+/// `Option<&UnsafeCell<_>>`, but it could be one day).
+///
+/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
+/// [`UnsafeCell`]: core::cell::UnsafeCell
///
/// # Rationale
///
/// ## Why isn't an explicit representation required for structs?
///
/// Per the [Rust reference](reference),
+///
/// > The representation of a type can change the padding between fields, but
/// does not change the layout of the fields themselves.
///
/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
///
/// Since the layout of structs only consists of padding bytes and field bytes,
-/// a struct is soundly `FromBytes` if:
-/// 1. its padding is soundly `FromBytes`, and
-/// 2. its fields are soundly `FromBytes`.
+/// a struct is soundly `FromZeroes` if:
+/// 1. its padding is soundly `FromZeroes`, and
+/// 2. its fields are soundly `FromZeroes`.
///
/// The answer to the first question is always yes: padding bytes do not have
/// any validity constraints. A [discussion] of this question in the Unsafe Code
@@ -177,63 +607,46 @@ macro_rules! impl_for_primitives {
///
/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
///
-/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
-/// its fields are `FromBytes`.
-pub unsafe trait FromBytes {
- // NOTE: The Self: Sized bound makes it so that FromBytes is still object
+/// Whether a struct is soundly `FromZeroes` therefore solely depends on whether
+/// its fields are `FromZeroes`.
+// TODO(#146): Document why we don't require an enum to have an explicit `repr`
+// attribute.
+pub unsafe trait FromZeroes {
+ // The `Self: Sized` bound makes it so that `FromZeroes` is still object
// safe.
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
- /// Reads a copy of `Self` from `bytes`.
- ///
- /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
- fn read_from<B: ByteSlice>(bytes: B) -> Option<Self>
- where
- Self: Sized,
- {
- let lv = LayoutVerified::<_, Unalign<Self>>::new_unaligned(bytes)?;
- Some(lv.read().into_inner())
- }
-
- /// Reads a copy of `Self` from the prefix of `bytes`.
- ///
- /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
- /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
- /// `None`.
- fn read_from_prefix<B: ByteSlice>(bytes: B) -> Option<Self>
- where
- Self: Sized,
- {
- let (lv, _suffix) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)?;
- Some(lv.read().into_inner())
- }
-
- /// Reads a copy of `Self` from the suffix of `bytes`.
- ///
- /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
- /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
- /// `None`.
- fn read_from_suffix<B: ByteSlice>(bytes: B) -> Option<Self>
- where
- Self: Sized,
- {
- let (_prefix, lv) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)?;
- Some(lv.read().into_inner())
+ /// Overwrites `self` with zeroes.
+ ///
+ /// Sets every byte in `self` to 0. While this is similar to doing `*self =
+ /// Self::new_zeroed()`, it differs in that `zero` does not semantically
+ /// drop the current value and replace it with a new one - it simply
+ /// modifies the bytes of the existing value.
+ #[inline(always)]
+ fn zero(&mut self) {
+ let slf: *mut Self = self;
+ let len = mem::size_of_val(self);
+ // SAFETY:
+ // - `self` is guaranteed by the type system to be valid for writes of
+ // size `size_of_val(self)`.
+ // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
+ // as required by `u8`.
+ // - Since `Self: FromZeroes`, the all-zeroes instance is a valid
+ // instance of `Self.`
+ unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
}
/// Creates an instance of `Self` from zeroed bytes.
+ #[inline(always)]
fn new_zeroed() -> Self
where
Self: Sized,
{
- unsafe {
- // Safe because FromBytes says all bit patterns (including zeroes)
- // are legal.
- core::mem::zeroed()
- }
+ // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal.
+ unsafe { mem::zeroed() }
}
/// Creates a `Box<Self>` from zeroed bytes.
@@ -254,20 +667,23 @@ pub unsafe trait FromBytes {
/// # Panics
///
/// Panics if allocation of `size_of::<Self>()` bytes fails.
- #[cfg(any(test, feature = "alloc"))]
+ #[cfg(feature = "alloc")]
+ #[inline]
fn new_box_zeroed() -> Box<Self>
where
Self: Sized,
{
- // If T is a ZST, then return a proper boxed instance of it. There is no
- // allocation, but Box does require a correct dangling pointer.
+ // If `T` is a ZST, then return a proper boxed instance of it. There is
+ // no allocation, but `Box` does require a correct dangling pointer.
let layout = Layout::new::<Self>();
if layout.size() == 0 {
return Box::new(Self::new_zeroed());
}
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
- let ptr = alloc::alloc::alloc_zeroed(layout) as *mut Self;
+ let ptr = alloc::alloc::alloc_zeroed(layout).cast::<Self>();
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
@@ -296,44 +712,207 @@ pub unsafe trait FromBytes {
///
/// * Panics if `size_of::<Self>() * len` overflows.
/// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
- #[cfg(any(test, feature = "alloc"))]
+ #[cfg(feature = "alloc")]
+ #[inline]
fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
where
Self: Sized,
{
- // TODO(https://fxbug.dev/80757): Use Layout::repeat() when `alloc_layout_extra` is stabilized
- // This will intentionally panic if it overflows.
+ let size = mem::size_of::<Self>()
+ .checked_mul(len)
+ .expect("mem::size_of::<Self>() * len overflows `usize`");
+ let align = mem::align_of::<Self>();
+ // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
+ // bug in which sufficiently-large allocations (those which, when
+ // rounded up to the alignment, overflow `isize`) are not rejected,
+ // which can cause undefined behavior. See #64 for details.
+ //
+ // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
+ #[allow(clippy::as_conversions)]
+ let max_alloc = (isize::MAX as usize).saturating_sub(align);
+ assert!(size <= max_alloc);
+ // TODO(https://github.com/rust-lang/rust/issues/55724): Use
+ // `Layout::repeat` once it's stabilized.
+ let layout =
+ Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
+
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
- // from_size_align_unchecked() is sound because slice_len_bytes is
- // guaranteed to be properly aligned (we just multiplied it by
- // size_of::<T>(), which is guaranteed to be aligned).
- let layout = Layout::from_size_align_unchecked(
- size_of::<Self>().checked_mul(len).unwrap(),
- align_of::<Self>(),
- );
if layout.size() != 0 {
- let ptr = alloc::alloc::alloc_zeroed(layout) as *mut Self;
+ let ptr = alloc::alloc::alloc_zeroed(layout).cast::<Self>();
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
- Box::from_raw(core::slice::from_raw_parts_mut(ptr, len))
+ Box::from_raw(slice::from_raw_parts_mut(ptr, len))
} else {
- // Box<[T]> does not allocate when T is zero-sized or when len
- // is zero, but it does require a non-null dangling pointer for
- // its allocation.
- Box::from_raw(core::slice::from_raw_parts_mut(
- NonNull::<Self>::dangling().as_ptr(),
- len,
- ))
+ // `Box<[T]>` does not allocate when `T` is zero-sized or when
+ // `len` is zero, but it does require a non-null dangling
+ // pointer for its allocation.
+ Box::from_raw(slice::from_raw_parts_mut(NonNull::<Self>::dangling().as_ptr(), len))
}
}
}
+
+ /// Creates a `Vec<Self>` from zeroed bytes.
+ ///
+ /// This function is useful for allocating large values of `Vec`s and
+ /// zero-initializing them, without ever creating a temporary instance of
+ /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
+ /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
+ /// heap; it does not require storing intermediate values on the stack.
+ ///
+ /// On systems that use a heap implementation that supports allocating from
+ /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
+ ///
+ /// If `Self` is a zero-sized type, then this function will return a
+ /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
+ /// actual information, but its `len()` property will report the correct
+ /// value.
+ ///
+ /// # Panics
+ ///
+ /// * Panics if `size_of::<Self>() * len` overflows.
+ /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
+ #[cfg(feature = "alloc")]
+ #[inline(always)]
+ fn new_vec_zeroed(len: usize) -> Vec<Self>
+ where
+ Self: Sized,
+ {
+ Self::new_box_slice_zeroed(len).into()
+ }
+}
+
+/// Types for which any byte pattern is valid.
+///
+/// WARNING: Do not implement this trait yourself! Instead, use
+/// `#[derive(FromBytes)]` (requires the `derive` Cargo feature).
+///
+/// `FromBytes` types can safely be deserialized from an untrusted sequence of
+/// bytes because any byte sequence corresponds to a valid instance of the type.
+///
+/// `FromBytes` is ignorant of byte order. For byte order-aware types, see the
+/// [`byteorder`] module.
+///
+/// # Safety
+///
+/// *This section describes what is required in order for `T: FromBytes`, and
+/// what unsafe code may assume of such types. `#[derive(FromBytes)]` only
+/// permits types which satisfy these requirements. If you don't plan on
+/// implementing `FromBytes` manually, and you don't plan on writing unsafe code
+/// that operates on `FromBytes` types, then you don't need to read this
+/// section.*
+///
+/// If `T: FromBytes`, then unsafe code may assume that:
+/// - It is sound to treat any initialized sequence of bytes of length
+/// `size_of::<T>()` as a `T`.
+/// - Given `b: &[u8]` where `b.len() == size_of::<T>()` and `b` is aligned to
+/// `align_of::<T>()`, it is sound to construct a `t: &T` at the same address
+/// as `b`, and it is sound for both `b` and `t` to be live at the same time.
+///
+/// If a type is marked as `FromBytes` which violates this contract, it may
+/// cause undefined behavior.
+///
+/// If a type has the following properties, then it is sound to implement
+/// `FromBytes` for that type:
+/// - If the type is a struct, all of its fields must satisfy the requirements
+/// to be `FromBytes` (they do not actually have to be `FromBytes`)
+/// - If the type is an enum:
+/// - It must be a C-like enum (meaning that all variants have no fields).
+/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
+/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
+/// - The maximum number of discriminants must be used (so that every possible
+/// bit pattern is a valid one). Be very careful when using the `C`,
+/// `usize`, or `isize` representations, as their size is
+/// platform-dependent.
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes
+/// (`FromBytes` is not currently implemented for, e.g., `Option<*const
+/// UnsafeCell<_>>`, but it could be one day).
+///
+/// [`UnsafeCell`]: core::cell::UnsafeCell
+///
+/// # Rationale
+///
+/// ## Why isn't an explicit representation required for structs?
+///
+/// Per the [Rust reference](reference),
+///
+/// > The representation of a type can change the padding between fields, but
+/// does not change the layout of the fields themselves.
+///
+/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
+///
+/// Since the layout of structs only consists of padding bytes and field bytes,
+/// a struct is soundly `FromBytes` if:
+/// 1. its padding is soundly `FromBytes`, and
+/// 2. its fields are soundly `FromBytes`.
+///
+/// The answer to the first question is always yes: padding bytes do not have
+/// any validity constraints. A [discussion] of this question in the Unsafe Code
+/// Guidelines Working Group concluded that it would be virtually unimaginable
+/// for future versions of rustc to add validity constraints to padding bytes.
+///
+/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
+///
+/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
+/// its fields are `FromBytes`.
+pub unsafe trait FromBytes: FromZeroes {
+ // The `Self: Sized` bound makes it so that `FromBytes` is still object
+ // safe.
+ #[doc(hidden)]
+ fn only_derive_is_allowed_to_implement_this_trait()
+ where
+ Self: Sized;
+
+ /// Reads a copy of `Self` from `bytes`.
+ ///
+ /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
+ #[inline]
+ fn read_from(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ let r = Ref::<_, Unalign<Self>>::new_unaligned(bytes)?;
+ Some(r.read().into_inner())
+ }
+
+ /// Reads a copy of `Self` from the prefix of `bytes`.
+ ///
+ /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
+ /// `None`.
+ #[inline]
+ fn read_from_prefix(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ let (r, _suffix) = Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)?;
+ Some(r.read().into_inner())
+ }
+
+ /// Reads a copy of `Self` from the suffix of `bytes`.
+ ///
+ /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
+ /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
+ /// `None`.
+ #[inline]
+ fn read_from_suffix(bytes: &[u8]) -> Option<Self>
+ where
+ Self: Sized,
+ {
+ let (_prefix, r) = Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)?;
+ Some(r.read().into_inner())
+ }
}
/// Types which are safe to treat as an immutable byte slice.
///
/// WARNING: Do not implement this trait yourself! Instead, use
-/// `#[derive(AsBytes)]`.
+/// `#[derive(AsBytes)]` (requires the `derive` Cargo feature).
///
/// `AsBytes` types can be safely viewed as a slice of bytes. In particular,
/// this means that, in any valid instance of the type, none of the bytes of the
@@ -350,40 +929,71 @@ pub unsafe trait FromBytes {
/// get an error like this:
///
/// ```text
-/// error[E0080]: evaluation of constant value failed
-/// --> lib.rs:1:10
+/// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied
+/// --> lib.rs:23:10
/// |
/// 1 | #[derive(AsBytes)]
-/// | ^^^^^^^ attempt to divide by zero
+/// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>`
+/// |
+/// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>`
/// ```
///
-/// This error means that the type being annotated has padding bytes, which is
-/// illegal for `AsBytes` types. Consider either adding explicit struct fields
-/// where those padding bytes would be or using `#[repr(packed)]`.
+/// This error indicates that the type being annotated has padding bytes, which
+/// is illegal for `AsBytes` types. Consider reducing the alignment of some
+/// fields by using types in the [`byteorder`] module, adding explicit struct
+/// fields where those padding bytes would be, or using `#[repr(packed)]`. See
+/// the Rust Reference's [page on type layout](type-layout) for more information
+/// about type layout and padding.
///
/// # Safety
///
-/// If `T: AsBytes`, then unsafe code may assume that it is sound to treat any
-/// instance of the type as an immutable `[u8]` of length `size_of::<T>()`. If a
-/// type is marked as `AsBytes` which violates this contract, it may cause
+/// *This section describes what is required in order for `T: AsBytes`, and what
+/// unsafe code may assume of such types. `#[derive(AsBytes)]` only permits
+/// types which satisfy these requirements. If you don't plan on implementing
+/// `AsBytes` manually, and you don't plan on writing unsafe code that operates
+/// on `AsBytes` types, then you don't need to read this section.*
+///
+/// If `T: AsBytes`, then unsafe code may assume that:
+/// - It is sound to treat any `t: T` as an immutable `[u8]` of length
+/// `size_of_val(t)`.
+/// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() ==
+/// size_of_val(t)` at the same address as `t`, and it is sound for both `b`
+/// and `t` to be live at the same time.
+///
+/// If a type is marked as `AsBytes` which violates this contract, it may cause
/// undefined behavior.
///
-/// If a type has the following properties, then it is safe to implement
-/// `AsBytes` for that type
+/// If a type has the following properties, then it is sound to implement
+/// `AsBytes` for that type:
/// - If the type is a struct:
/// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
/// or `repr(packed)`).
-/// - All of its fields must be `AsBytes`
+/// - All of its fields must satisfy the requirements to be `AsBytes` (they do
+/// not actually have to be `AsBytes`).
/// - Its layout must have no padding. This is always true for
/// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
/// algorithm described in the [Rust Reference].
/// - If the type is an enum:
-/// - It must be a C-like enum (meaning that all variants have no fields)
+/// - It must be a C-like enum (meaning that all variants have no fields).
/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
+/// - The type must not contain any [`UnsafeCell`]s (this is required in order
+/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
+/// memory). The type may contain references or pointers to `UnsafeCell`s so
+/// long as those values can themselves be initialized from zeroes (`AsBytes`
+/// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it
+/// could be one day).
///
+/// [type-layout]: https://doc.rust-lang.org/reference/type-layout.html
/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
+/// [`UnsafeCell`]: core::cell::UnsafeCell
pub unsafe trait AsBytes {
+ // The `Self: Sized` bound makes it so that this function doesn't prevent
+ // `AsBytes` from being object safe. Note that other `AsBytes` methods
+ // prevent object safety, but those provide a benefit in exchange for object
+ // safety. If at some point we remove those methods, change their type
+ // signatures, or move them out of this trait so that `AsBytes` is object
+ // safe again, it's important that this function not prevent object safety.
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
@@ -393,35 +1003,75 @@ pub unsafe trait AsBytes {
///
/// `as_bytes` provides access to the bytes of this value as an immutable
/// byte slice.
+ #[inline(always)]
fn as_bytes(&self) -> &[u8] {
- unsafe {
- // NOTE: This function does not have a Self: Sized bound.
- // size_of_val works for unsized values too.
- let len = mem::size_of_val(self);
- slice::from_raw_parts(self as *const Self as *const u8, len)
- }
+ // Note that this method does not have a `Self: Sized` bound;
+ // `size_of_val` works for unsized values too.
+ let len = mem::size_of_val(self);
+ let slf: *const Self = self;
+
+ // SAFETY:
+ // - `slf.cast::<u8>()` is valid for reads for `len *
+ // mem::size_of::<u8>()` many bytes because...
+ // - `slf` is the same pointer as `self`, and `self` is a reference
+ // which points to an object whose size is `len`. Thus...
+ // - The entire region of `len` bytes starting at `slf` is contained
+ // within a single allocation.
+ // - `slf` is non-null.
+ // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
+ // - `Self: AsBytes` ensures that all of the bytes of `slf` are
+ // initialized.
+ // - Since `slf` is derived from `self`, and `self` is an immutable
+ // reference, the only other references to this memory region that
+ // could exist are other immutable references, and those don't allow
+ // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s,
+ // which are the only types for which this rule wouldn't be sufficient.
+ // - The total size of the resulting slice is no larger than
+ // `isize::MAX` because no allocation produced by safe code can be
+ // larger than `isize::MAX`.
+ unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
}
/// Gets the bytes of this value mutably.
///
/// `as_bytes_mut` provides access to the bytes of this value as a mutable
/// byte slice.
+ #[inline(always)]
fn as_bytes_mut(&mut self) -> &mut [u8]
where
Self: FromBytes,
{
- unsafe {
- // NOTE: This function does not have a Self: Sized bound.
- // size_of_val works for unsized values too.
- let len = mem::size_of_val(self);
- slice::from_raw_parts_mut(self as *mut Self as *mut u8, len)
- }
+ // Note that this method does not have a `Self: Sized` bound;
+ // `size_of_val` works for unsized values too.
+ let len = mem::size_of_val(self);
+ let slf: *mut Self = self;
+
+ // SAFETY:
+ // - `slf.cast::<u8>()` is valid for reads and writes for `len *
+ // mem::size_of::<u8>()` many bytes because...
+ // - `slf` is the same pointer as `self`, and `self` is a reference
+ // which points to an object whose size is `len`. Thus...
+ // - The entire region of `len` bytes starting at `slf` is contained
+ // within a single allocation.
+ // - `slf` is non-null.
+ // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
+ // - `Self: AsBytes` ensures that all of the bytes of `slf` are
+ // initialized.
+ // - `Self: FromBytes` ensures that no write to this memory region
+ // could result in it containing an invalid `Self`.
+ // - Since `slf` is derived from `self`, and `self` is a mutable
+ // reference, no other references to this memory region can exist.
+ // - The total size of the resulting slice is no larger than
+ // `isize::MAX` because no allocation produced by safe code can be
+ // larger than `isize::MAX`.
+ unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
}
/// Writes a copy of `self` to `bytes`.
///
/// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
- fn write_to<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
+ #[inline]
+ fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
if bytes.len() != mem::size_of_val(self) {
return None;
}
@@ -434,50 +1084,50 @@ pub unsafe trait AsBytes {
///
/// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
/// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
- fn write_to_prefix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
+ #[inline]
+ fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
let size = mem::size_of_val(self);
- if bytes.len() < size {
- return None;
- }
-
- bytes[..size].copy_from_slice(self.as_bytes());
+ bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
Some(())
}
/// Writes a copy of `self` to the suffix of `bytes`.
///
- /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes
- /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
- fn write_to_suffix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
+ /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
+ /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
+ #[inline]
+ fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
let start = bytes.len().checked_sub(mem::size_of_val(self))?;
- bytes[start..].copy_from_slice(self.as_bytes());
+ bytes
+ .get_mut(start..)
+ .expect("`start` should be in-bounds of `bytes`")
+ .copy_from_slice(self.as_bytes());
Some(())
}
}
-// Special case for bool (it is not included in `impl_for_primitives!`).
-impl_for_types!(AsBytes, bool);
-
-impl_for_primitives!(FromBytes);
-impl_for_primitives!(AsBytes);
-impl_for_composite_types!(FromBytes);
-impl_for_composite_types!(AsBytes);
-
/// Types with no alignment requirement.
///
/// WARNING: Do not implement this trait yourself! Instead, use
-/// `#[derive(Unaligned)]`.
+/// `#[derive(Unaligned)]` (requires the `derive` Cargo feature).
///
/// If `T: Unaligned`, then `align_of::<T>() == 1`.
///
/// # Safety
///
+/// *This section describes what is required in order for `T: Unaligned`, and
+/// what unsafe code may assume of such types. `#[derive(Unaligned)]` only
+/// permits types which satisfy these requirements. If you don't plan on
+/// implementing `Unaligned` manually, and you don't plan on writing unsafe code
+/// that operates on `Unaligned` types, then you don't need to read this
+/// section.*
+///
/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
/// reference to `T` at any memory location regardless of alignment. If a type
/// is marked as `Unaligned` which violates this contract, it may cause
/// undefined behavior.
pub unsafe trait Unaligned {
- // NOTE: The Self: Sized bound makes it so that Unaligned is still object
+ // The `Self: Sized` bound makes it so that `Unaligned` is still object
// safe.
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
@@ -485,8 +1135,289 @@ pub unsafe trait Unaligned {
Self: Sized;
}
-impl_for_types!(Unaligned, u8, i8);
-impl_for_composite_types!(Unaligned);
+safety_comment! {
+ /// SAFETY:
+ /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a
+ /// zero-sized type to have a size of 0 and an alignment of 1."
+ /// - `FromZeroes`, `FromBytes`: There is only one possible sequence of 0
+ /// bytes, and `()` is inhabited.
+ /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes.
+ /// - `Unaligned`: `()` has alignment 1.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout
+ unsafe_impl!((): FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(());
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`, `FromBytes`: all bit patterns are valid for integers [1]
+ /// - `AsBytes`: integers have no padding bytes [1]
+ /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size
+ /// of `u8` and `i8` as 1 byte. We also know that:
+ /// - Alignment is >= 1
+ /// - Size is an integer multiple of alignment
+ /// - The only value >= 1 for which 1 is an integer multiple is 1
+ /// Therefore, the only possible alignment for `u8` and `i8` is 1.
+ ///
+ /// [1] TODO(https://github.com/rust-lang/reference/issues/1291): Once the
+ /// reference explicitly guarantees these properties, cite it.
+ /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout
+ unsafe_impl!(u8: FromZeroes, FromBytes, AsBytes, Unaligned);
+ unsafe_impl!(i8: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(u8, i8);
+ unsafe_impl!(u16: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i16: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u32: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i32: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u64: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i64: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(u128: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(i128: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(usize: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(isize: FromZeroes, FromBytes, AsBytes);
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`, `FromBytes`: the `{f32,f64}::from_bits` constructors'
+ /// documentation [1,2] states that they are currently equivalent to
+ /// `transmute`. [3]
+ /// - `AsBytes`: the `{f32,f64}::to_bits` methods' documentation [4,5]
+ /// states that they are currently equivalent to `transmute`. [3]
+ ///
+ /// TODO: Make these arguments more precisely in terms of the documentation.
+ ///
+ /// [1] https://doc.rust-lang.org/nightly/std/primitive.f32.html#method.from_bits
+ /// [2] https://doc.rust-lang.org/nightly/std/primitive.f64.html#method.from_bits
+ /// [3] TODO(https://github.com/rust-lang/reference/issues/1291): Once the
+ /// reference explicitly guarantees these properties, cite it.
+ /// [4] https://doc.rust-lang.org/nightly/std/primitive.f32.html#method.to_bits
+ /// [5] https://doc.rust-lang.org/nightly/std/primitive.f64.html#method.to_bits
+ unsafe_impl!(f32: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(f64: FromZeroes, FromBytes, AsBytes);
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`: Per the reference [1], 0x00 is a valid bit pattern for
+ /// `bool`.
+ /// - `AsBytes`: Per the reference [1], `bool` always has a size of 1 with
+ /// valid bit patterns 0x01 and 0x00, so the only byte of the bool is
+ /// always initialized
+ /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type
+ /// has a size and alignment of 1 each."
+ ///
+ /// [1] https://doc.rust-lang.org/reference/types/boolean.html
+ unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
+ assert_unaligned!(bool);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`: Per the reference [1], 0x0000 is a valid bit pattern for
+ /// `char`.
+ /// - `AsBytes`: `char` is represented as a 32-bit unsigned word (`u32`)
+ /// [1], which is `AsBytes`. Note that unlike `u32`, not all bit patterns
+ /// are valid for `char`.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/types/textual.html
+ unsafe_impl!(char: FromZeroes, AsBytes);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str` has
+ /// the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`, and
+ /// `Unaligned`.
+ ///
+ /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!`
+ /// uses `align_of`, which only works for `Sized` types.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout
+ unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
+}
+
+safety_comment! {
+ // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`.
+ //
+ /// SAFETY:
+ /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated
+ /// primitive. Since it is the same size, this guarantees it has no
+ /// padding - integers have no padding, and there's no room for padding
+ /// if it can represent all of the same values except 0.
+ /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
+ /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
+ /// This is worded in a way that makes it unclear whether it's meant as a
+ /// guarantee, but given the purpose of those types, it's virtually
+ /// unthinkable that that would ever change. `Option` cannot be smaller
+ /// than its contained type, which implies that, and `NonZeroX8` are of
+ /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot
+ /// be 0 bytes, which means that they must be 1 byte. The only valid
+ /// alignment for a 1-byte type is 1.
+ ///
+ /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
+ /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
+ /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
+ /// that layout is the same as primitive layout.
+ unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
+ unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
+ assert_unaligned!(NonZeroU8, NonZeroI8);
+ unsafe_impl!(NonZeroU16: AsBytes);
+ unsafe_impl!(NonZeroI16: AsBytes);
+ unsafe_impl!(NonZeroU32: AsBytes);
+ unsafe_impl!(NonZeroI32: AsBytes);
+ unsafe_impl!(NonZeroU64: AsBytes);
+ unsafe_impl!(NonZeroI64: AsBytes);
+ unsafe_impl!(NonZeroU128: AsBytes);
+ unsafe_impl!(NonZeroI128: AsBytes);
+ unsafe_impl!(NonZeroUsize: AsBytes);
+ unsafe_impl!(NonZeroIsize: AsBytes);
+}
+safety_comment! {
+ /// SAFETY:
+ /// - `FromZeroes`, `FromBytes`, `AsBytes`: The Rust compiler reuses `0`
+ /// value to represent `None`, so `size_of::<Option<NonZeroXxx>>() ==
+ /// size_of::<xxx>()`; see `NonZeroXxx` documentation.
+ /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
+ /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
+ /// This is worded in a way that makes it unclear whether it's meant as a
+ /// guarantee, but given the purpose of those types, it's virtually
+ /// unthinkable that that would ever change. The only valid alignment for
+ /// a 1-byte type is 1.
+ ///
+ /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
+ /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
+ ///
+ /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
+ /// for layout guarantees.
+ unsafe_impl!(Option<NonZeroU8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ unsafe_impl!(Option<NonZeroI8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
+ unsafe_impl!(Option<NonZeroU16>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI16>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU32>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI32>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU64>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI64>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroU128>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroI128>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroUsize>: FromZeroes, FromBytes, AsBytes);
+ unsafe_impl!(Option<NonZeroIsize>: FromZeroes, FromBytes, AsBytes);
+}
+
+safety_comment! {
+ /// SAFETY:
+ /// For all `T`, `PhantomData<T>` has size 0 and alignment 1. [1]
+ /// - `FromZeroes`, `FromBytes`: There is only one possible sequence of 0
+ /// bytes, and `PhantomData` is inhabited.
+ /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding
+ /// bytes.
+ /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment
+ /// 1.
+ ///
+ /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1
+ unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
+ unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
+ assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout as
+ /// `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and has a single
+ /// field, which is `pub`. Per the reference [2], this means that the
+ /// `#[repr(transparent)]` attribute is "considered part of the public ABI".
+ ///
+ /// [1] https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1
+ /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent
+ unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
+ unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
+ unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
+ unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
+ assert_unaligned!(Wrapping<()>, Wrapping<u8>);
+}
+safety_comment! {
+ // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes`
+ // since it may contain uninitialized bytes.
+ //
+ /// SAFETY:
+ /// - `FromZeroes`, `FromBytes`: `MaybeUninit<T>` has no restrictions on its
+ /// contents. Unfortunately, in addition to bit validity, `FromZeroes` and
+ /// `FromBytes` also require that implementers contain no `UnsafeCell`s.
+ /// Thus, we require `T: FromZeroes` and `T: FromBytes` in order to ensure
+ /// that `T` - and thus `MaybeUninit<T>` - contains to `UnsafeCell`s.
+ /// Thus, requiring that `T` implement each of these traits is sufficient
+ /// - `Unaligned`: `MaybeUninit<T>` is guaranteed by its documentation [1]
+ /// to have the same alignment as `T`.
+ ///
+ /// [1]
+ /// https://doc.rust-lang.org/nightly/core/mem/union.MaybeUninit.html#layout-1
+ ///
+ /// TODO(https://github.com/google/zerocopy/issues/251): If we split
+ /// `FromBytes` and `RefFromBytes`, or if we introduce a separate
+ /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes`
+ /// and `FromBytes`.
+ unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
+ unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
+ unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
+ assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// `ManuallyDrop` has the same layout as `T`, and accessing the inner value
+ /// is safe (meaning that it's unsound to leave the inner value
+ /// uninitialized while exposing the `ManuallyDrop` to safe code).
+ /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any
+ /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence
+ /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If
+ /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid
+ /// `ManuallyDrop<T>`.
+ /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound
+ /// to let safe code access a `ManuallyDrop` whose inner value is
+ /// uninitialized, safe code can only ever access a `ManuallyDrop` whose
+ /// contents are a valid `T`. Since `T: AsBytes`, this means that safe
+ /// code can only ever access a `ManuallyDrop` with all initialized bytes.
+ /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment)
+ /// as `T`, and `T: Unaligned` guarantees that that alignment is 1.
+ unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
+ unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
+ assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
+}
+safety_comment! {
+ /// SAFETY:
+ /// Per the reference [1]:
+ ///
+ /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
+ /// alignment of `T`. Arrays are laid out so that the zero-based `nth`
+ /// element of the array is offset from the start of the array by `n *
+ /// size_of::<T>()` bytes.
+ ///
+ /// ...
+ ///
+ /// Slices have the same layout as the section of the array they slice.
+ ///
+ /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s
+ /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T;
+ /// N]` are `FromZeroes`, `FromBytes`, and `AsBytes` if `T` is
+ /// (respectively). Furthermore, since an array/slice has "the same
+ /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is.
+ ///
+ /// Note that we don't `assert_unaligned!` for slice types because
+ /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types.
+ ///
+ /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
+ unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
+ unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
+ unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
+ unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
+ assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
+ unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
+ unsafe_impl!(T: FromBytes => FromBytes for [T]);
+ unsafe_impl!(T: AsBytes => AsBytes for [T]);
+ unsafe_impl!(T: Unaligned => Unaligned for [T]);
+}
// SIMD support
//
@@ -533,8 +1464,8 @@ impl_for_composite_types!(Unaligned);
// Given this background, we can observe that:
// - The size and bit pattern requirements of a SIMD type are equivalent to the
// equivalent array type. Thus, for any SIMD type whose primitive `T` is
-// `FromBytes`, that SIMD type is also `FromBytes`. The same holds for
-// `AsBytes`.
+// `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is also
+// `FromZeroes`, `FromBytes`, or `AsBytes` respectively.
// - Since no upper bound is placed on the alignment, no SIMD type can be
// guaranteed to be `Unaligned`.
//
@@ -545,29 +1476,36 @@ impl_for_composite_types!(Unaligned);
//
// See issue #38 [2]. While this behavior is not technically guaranteed, the
// likelihood that the behavior will change such that SIMD types are no longer
-// `FromBytes` or `AsBytes` is next to zero, as that would defeat the entire
-// purpose of SIMD types. Nonetheless, we put this behavior behind the `simd`
-// Cargo feature, which requires consumers to opt into this stability hazard.
+// `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as that would defeat
+// the entire purpose of SIMD types. Nonetheless, we put this behavior behind
+// the `simd` Cargo feature, which requires consumers to opt into this stability
+// hazard.
//
// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
#[cfg(feature = "simd")]
mod simd {
- /// Defines a module which implements `FromBytes` and `AsBytes` for a set of
- /// types from a module in `core::arch`.
+ /// Defines a module which implements `FromZeroes`, `FromBytes`, and
+ /// `AsBytes` for a set of types from a module in `core::arch`.
///
/// `$arch` is both the name of the defined module and the name of the
/// module in `core::arch`, and `$typ` is the list of items from that module
- /// to implement `FromBytes` and `AsBytes` for.
+ /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for.
+ #[allow(unused_macros)] // `allow(unused_macros)` is needed because some
+ // target/feature combinations don't emit any impls
+ // and thus don't use this macro.
macro_rules! simd_arch_mod {
($arch:ident, $($typ:ident),*) => {
mod $arch {
use core::arch::$arch::{$($typ),*};
use crate::*;
-
- impl_for_types!(FromBytes, $($typ),*);
- impl_for_types!(AsBytes, $($typ),*);
+ impl_known_layout!($($typ),*);
+ safety_comment! {
+ /// SAFETY:
+ /// See comment on module definition for justification.
+ $( unsafe_impl!($typ: FromZeroes, FromBytes, AsBytes); )*
+ }
}
};
}
@@ -594,7 +1532,7 @@ mod simd {
vector_signed_long,
vector_unsigned_long
);
- #[cfg(all(feature = "simd-nightly", target_arch = "aarch64"))]
+ #[cfg(target_arch = "aarch64")]
#[rustfmt::skip]
simd_arch_mod!(
aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
@@ -607,111 +1545,7 @@ mod simd {
);
#[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
#[rustfmt::skip]
- simd_arch_mod!(
- arm, float32x2_t, float32x4_t, int8x4_t, int8x8_t, int8x8x2_t, int8x8x3_t, int8x8x4_t,
- int8x16_t, int16x2_t, int16x4_t, int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t,
- poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t, poly8x16_t, poly16x4_t, poly16x8_t,
- poly64x1_t, poly64x2_t, uint8x4_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t,
- uint8x16_t, uint16x2_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t,
- uint64x2_t
- );
-}
-
-/// A type with no alignment requirement.
-///
-/// A `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>`
-/// has the same size and ABI as `T`, but not necessarily the same alignment.
-/// This is useful if a type with an alignment requirement needs to be read from
-/// a chunk of memory which provides no alignment guarantees.
-///
-/// Since `Unalign` has no alignment requirement, the inner `T` may not be
-/// properly aligned in memory, and so `Unalign` provides no way of getting a
-/// reference to the inner `T`. Instead, the `T` may only be obtained by value
-/// (see [`get`] and [`into_inner`]).
-///
-/// [`get`]: Unalign::get
-/// [`into_inner`]: Unalign::into_inner
-#[derive(FromBytes, Unaligned, Copy)]
-#[repr(C, packed)]
-pub struct Unalign<T>(T);
-
-// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be
-// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound
-// is not sufficient to implement `Clone` for `Unalign`.
-impl<T: Copy> Clone for Unalign<T> {
- fn clone(&self) -> Unalign<T> {
- *self
- }
-}
-
-impl<T> Unalign<T> {
- /// Constructs a new `Unalign`.
- pub fn new(val: T) -> Unalign<T> {
- Unalign(val)
- }
-
- /// Consumes `self`, returning the inner `T`.
- pub fn into_inner(self) -> T {
- let Unalign(val) = self;
- val
- }
-
- /// Gets an unaligned raw pointer to the inner `T`.
- ///
- /// # Safety
- ///
- /// The returned raw pointer is not necessarily aligned to
- /// `align_of::<T>()`. Most functions which operate on raw pointers require
- /// those pointers to be aligned, so calling those functions with the result
- /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
- /// using some out-of-band mechanism. In general, the only functions which
- /// are safe to call with this pointer are which that are explicitly
- /// documented as being sound to use with an unaligned pointer, such as
- /// [`read_unaligned`].
- ///
- /// [`read_unaligned`]: core::ptr::read_unaligned
- pub fn get_ptr(&self) -> *const T {
- ptr::addr_of!(self.0)
- }
-
- /// Gets an unaligned mutable raw pointer to the inner `T`.
- ///
- /// # Safety
- ///
- /// The returned raw pointer is not necessarily aligned to
- /// `align_of::<T>()`. Most functions which operate on raw pointers require
- /// those pointers to be aligned, so calling those functions with the result
- /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
- /// using some out-of-band mechanism. In general, the only functions which
- /// are safe to call with this pointer are those which are explicitly
- /// documented as being sound to use with an unaligned pointer, such as
- /// [`read_unaligned`].
- ///
- /// [`read_unaligned`]: core::ptr::read_unaligned
- pub fn get_mut_ptr(&mut self) -> *mut T {
- ptr::addr_of_mut!(self.0)
- }
-}
-
-impl<T: Copy> Unalign<T> {
- /// Gets a copy of the inner `T`.
- pub fn get(&self) -> T {
- let Unalign(val) = *self;
- val
- }
-}
-
-// SAFETY: Since `T: AsBytes`, we know that it's safe to construct a `&[u8]`
-// from an aligned `&T`. Since `&[u8]` itself has no alignment requirements, it
-// must also be safe to construct a `&[u8]` from a `&T` at any address. Since
-// `Unalign<T>` is `#[repr(packed)]`, everything about its layout except for its
-// alignment is the same as `T`'s layout.
-unsafe impl<T: AsBytes> AsBytes for Unalign<T> {
- fn only_derive_is_allowed_to_implement_this_trait()
- where
- Self: Sized,
- {
- }
+ simd_arch_mod!(arm, int8x4_t, uint8x4_t);
}
// Used in `transmute!` below.
@@ -742,15 +1576,15 @@ macro_rules! transmute {
// This branch, though never taken, ensures that the type of `e` is
// `AsBytes` and that the type of this macro invocation expression
// is `FromBytes`.
- fn transmute<T: $crate::AsBytes, U: $crate::FromBytes>(_t: T) -> U {
+ const fn transmute<T: $crate::AsBytes, U: $crate::FromBytes>(_t: T) -> U {
unreachable!()
}
transmute(e)
} else {
- // `core::mem::transmute` ensures that the type of `e` and the type
- // of this macro invocation expression have the same size. We know
- // this transmute is safe thanks to the `AsBytes` and `FromBytes`
- // bounds enforced by the `false` branch.
+ // SAFETY: `core::mem::transmute` ensures that the type of `e` and
+ // the type of this macro invocation expression have the same size.
+ // We know this transmute is safe thanks to the `AsBytes` and
+ // `FromBytes` bounds enforced by the `false` branch.
//
// We use `$crate::__real_transmute` because we know it will always
// be available for crates which are using the 2015 edition of Rust.
@@ -764,25 +1598,23 @@ macro_rules! transmute {
}}
}
-/// A length- and alignment-checked reference to a byte slice which can safely
-/// be reinterpreted as another type.
+/// A typed reference derived from a byte slice.
///
-/// `LayoutVerified` is a byte slice reference (`&[u8]`, `&mut [u8]`,
-/// `Ref<[u8]>`, `RefMut<[u8]>`, etc) with the invaraint that the slice's length
-/// and alignment are each greater than or equal to the length and alignment of
-/// `T`. Using this invariant, it implements `Deref` for `T` so long as `T:
-/// FromBytes` and `DerefMut` so long as `T: FromBytes + AsBytes`.
+/// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`.
+/// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same
+/// mutability as the byte slice it was constructed from (`B`).
///
/// # Examples
///
-/// `LayoutVerified` can be used to treat a sequence of bytes as a structured
-/// type, and to read and write the fields of that type as if the byte slice
-/// reference were simply a reference to that type.
+/// `Ref` can be used to treat a sequence of bytes as a structured type, and to
+/// read and write the fields of that type as if the byte slice reference were
+/// simply a reference to that type.
///
/// ```rust
-/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, LayoutVerified, Unaligned};
+/// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
+/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned};
///
-/// #[derive(FromBytes, AsBytes, Unaligned)]
+/// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
/// #[repr(C)]
/// struct UdpHeader {
/// src_port: [u8; 2],
@@ -792,13 +1624,13 @@ macro_rules! transmute {
/// }
///
/// struct UdpPacket<B> {
-/// header: LayoutVerified<B, UdpHeader>,
+/// header: Ref<B, UdpHeader>,
/// body: B,
/// }
///
/// impl<B: ByteSlice> UdpPacket<B> {
/// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
-/// let (header, body) = LayoutVerified::new_unaligned_from_prefix(bytes)?;
+/// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?;
/// Some(UdpPacket { header, body })
/// }
///
@@ -812,104 +1644,108 @@ macro_rules! transmute {
/// self.header.src_port = src_port;
/// }
/// }
+/// # }
/// ```
-pub struct LayoutVerified<B, T: ?Sized>(B, PhantomData<T>);
+pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
+
+/// Deprecated: prefer [`Ref`] instead.
+#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
+#[doc(hidden)]
+pub type LayoutVerified<B, T> = Ref<B, T>;
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSlice,
{
- /// Constructs a new `LayoutVerified`.
+ /// Constructs a new `Ref`.
///
/// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
- /// aligned to `align_of::<T>()`, and constructs a new `LayoutVerified`. If
- /// either of these checks fail, it returns `None`.
+ /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of
+ /// these checks fail, it returns `None`.
#[inline]
- pub fn new(bytes: B) -> Option<LayoutVerified<B, T>> {
- if bytes.len() != mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
+ pub fn new(bytes: B) -> Option<Ref<B, T>> {
+ if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
- Some(LayoutVerified(bytes, PhantomData))
+ Some(Ref(bytes, PhantomData))
}
- /// Constructs a new `LayoutVerified` from the prefix of a byte slice.
+ /// Constructs a new `Ref` from the prefix of a byte slice.
///
/// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
/// `bytes` is aligned to `align_of::<T>()`. It consumes the first
- /// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
- /// returns the remaining bytes to the caller. If either the length or
- /// alignment checks fail, it returns `None`.
+ /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
+ /// the remaining bytes to the caller. If either the length or alignment
+ /// checks fail, it returns `None`.
#[inline]
- pub fn new_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
- if bytes.len() < mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
+ pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
+ if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
- Some((LayoutVerified(bytes, PhantomData), suffix))
+ Some((Ref(bytes, PhantomData), suffix))
}
- /// Constructs a new `LayoutVerified` from the suffix of a byte slice.
+ /// Constructs a new `Ref` from the suffix of a byte slice.
///
/// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
/// the last `size_of::<T>()` bytes of `bytes` are aligned to
/// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
- /// to the caller. If either the length or alignment checks fail, it returns
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If either the length or alignment checks fail, it returns
/// `None`.
#[inline]
- pub fn new_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
+ pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
let bytes_len = bytes.len();
- if bytes_len < mem::size_of::<T>() {
+ let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
+ let (prefix, bytes) = bytes.split_at(split_at);
+ if !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
- let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
- if !aligned_to(bytes.deref(), mem::align_of::<T>()) {
- return None;
- }
- Some((prefix, LayoutVerified(bytes, PhantomData)))
+ Some((prefix, Ref(bytes, PhantomData)))
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
{
- /// Constructs a new `LayoutVerified` of a slice type.
+ /// Constructs a new `Ref` of a slice type.
///
/// `new_slice` verifies that `bytes.len()` is a multiple of
/// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
- /// constructs a new `LayoutVerified`. If either of these checks fail, it
- /// returns `None`.
+ /// constructs a new `Ref`. If either of these checks fail, it returns
+ /// `None`.
///
/// # Panics
///
/// `new_slice` panics if `T` is a zero-sized type.
#[inline]
- pub fn new_slice(bytes: B) -> Option<LayoutVerified<B, [T]>> {
- assert_ne!(mem::size_of::<T>(), 0);
- if bytes.len() % mem::size_of::<T>() != 0
- || !aligned_to(bytes.deref(), mem::align_of::<T>())
- {
+ pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
+ let remainder = bytes
+ .len()
+ .checked_rem(mem::size_of::<T>())
+ .expect("Ref::new_slice called on a zero-sized type");
+ if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
return None;
}
- Some(LayoutVerified(bytes, PhantomData))
+ Some(Ref(bytes, PhantomData))
}
- /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
- /// byte slice.
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice.
///
/// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
/// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
- /// first `size_of::<T>() * count` bytes from `bytes` to construct a
- /// `LayoutVerified`, and returns the remaining bytes to the caller. It also
- /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
- /// of the length, alignment, or overflow checks fail, it returns `None`.
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the remaining bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
///
/// # Panics
///
/// `new_slice_from_prefix` panics if `T` is a zero-sized type.
#[inline]
- pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(LayoutVerified<B, [T]>, B)> {
+ pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
@@ -921,21 +1757,20 @@ where
Self::new_slice(prefix).map(move |l| (l, bytes))
}
- /// Constructs a new `LayoutVerified` of a slice type from the suffix of a
- /// byte slice.
+ /// Constructs a new `Ref` of a slice type from the suffix of a byte slice.
///
/// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
/// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
- /// last `size_of::<T>() * count` bytes from `bytes` to construct a
- /// `LayoutVerified`, and returns the preceding bytes to the caller. It also
- /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
- /// of the length, alignment, or overflow checks fail, it returns `None`.
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the preceding bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
///
/// # Panics
///
/// `new_slice_from_suffix` panics if `T` is a zero-sized type.
#[inline]
- pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, LayoutVerified<B, [T]>)> {
+ pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
let expected_len = match mem::size_of::<T>().checked_mul(count) {
Some(len) => len,
None => return None,
@@ -948,105 +1783,98 @@ where
}
}
-fn map_zeroed<B: ByteSliceMut, T: ?Sized>(
- opt: Option<LayoutVerified<B, T>>,
-) -> Option<LayoutVerified<B, T>> {
+fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
match opt {
- Some(mut lv) => {
- for b in lv.0.iter_mut() {
- *b = 0;
- }
- Some(lv)
+ Some(mut r) => {
+ r.0.fill(0);
+ Some(r)
}
None => None,
}
}
fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
- opt: Option<(LayoutVerified<B, T>, B)>,
-) -> Option<(LayoutVerified<B, T>, B)> {
+ opt: Option<(Ref<B, T>, B)>,
+) -> Option<(Ref<B, T>, B)> {
match opt {
- Some((mut lv, rest)) => {
- for b in lv.0.iter_mut() {
- *b = 0;
- }
- Some((lv, rest))
+ Some((mut r, rest)) => {
+ r.0.fill(0);
+ Some((r, rest))
}
None => None,
}
}
fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
- opt: Option<(B, LayoutVerified<B, T>)>,
-) -> Option<(B, LayoutVerified<B, T>)> {
+ opt: Option<(B, Ref<B, T>)>,
+) -> Option<(B, Ref<B, T>)> {
map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
{
- /// Constructs a new `LayoutVerified` after zeroing the bytes.
+ /// Constructs a new `Ref` after zeroing the bytes.
///
/// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
- /// `bytes` is aligned to `align_of::<T>()`, and constructs a new
- /// `LayoutVerified`. If either of these checks fail, it returns `None`.
+ /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If
+ /// either of these checks fail, it returns `None`.
///
/// If the checks succeed, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
/// previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
+ #[inline(always)]
+ pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
map_zeroed(Self::new(bytes))
}
- /// Constructs a new `LayoutVerified` from the prefix of a byte slice,
- /// zeroing the prefix.
+ /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the
+ /// prefix.
///
/// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
/// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
- /// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
- /// returns the remaining bytes to the caller. If either the length or
- /// alignment checks fail, it returns `None`.
+ /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
+ /// the remaining bytes to the caller. If either the length or alignment
+ /// checks fail, it returns `None`.
///
/// If the checks succeed, then the prefix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
+ #[inline(always)]
+ pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
}
- /// Constructs a new `LayoutVerified` from the suffix of a byte slice,
- /// zeroing the suffix.
+ /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the
+ /// suffix.
///
/// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
/// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
/// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
- /// to the caller. If either the length or alignment checks fail, it returns
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If either the length or alignment checks fail, it returns
/// `None`.
///
/// If the checks succeed, then the suffix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
+ #[inline(always)]
+ pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
{
- /// Constructs a new `LayoutVerified` of a slice type after zeroing the
- /// bytes.
+ /// Constructs a new `Ref` of a slice type after zeroing the bytes.
///
/// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
/// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
- /// constructs a new `LayoutVerified`. If either of these checks fail, it
- /// returns `None`.
+ /// constructs a new `Ref`. If either of these checks fail, it returns
+ /// `None`.
///
/// If the checks succeed, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
@@ -1055,20 +1883,20 @@ where
/// # Panics
///
/// `new_slice` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
+ #[inline(always)]
+ pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
map_zeroed(Self::new_slice(bytes))
}
- /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
- /// byte slice, after zeroing the bytes.
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
+ /// after zeroing the bytes.
///
/// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
/// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
- /// first `size_of::<T>() * count` bytes from `bytes` to construct a
- /// `LayoutVerified`, and returns the remaining bytes to the caller. It also
- /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
- /// of the length, alignment, or overflow checks fail, it returns `None`.
+ /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the remaining bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
///
/// If the checks succeed, then the suffix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
@@ -1077,23 +1905,20 @@ where
/// # Panics
///
/// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_from_prefix_zeroed(
- bytes: B,
- count: usize,
- ) -> Option<(LayoutVerified<B, [T]>, B)> {
+ #[inline(always)]
+ pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
}
- /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
- /// byte slice, after zeroing the bytes.
+ /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
+ /// after zeroing the bytes.
///
/// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
/// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
- /// last `size_of::<T>() * count` bytes from `bytes` to construct a
- /// `LayoutVerified`, and returns the preceding bytes to the caller. It also
- /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
- /// of the length, alignment, or overflow checks fail, it returns `None`.
+ /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
+ /// and returns the preceding bytes to the caller. It also ensures that
+ /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
+ /// length, alignment, or overflow checks fail, it returns `None`.
///
/// If the checks succeed, then the consumed suffix will be initialized to
/// zero. This can be useful when re-using buffers to ensure that sensitive
@@ -1102,215 +1927,169 @@ where
/// # Panics
///
/// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_from_suffix_zeroed(
- bytes: B,
- count: usize,
- ) -> Option<(B, LayoutVerified<B, [T]>)> {
+ #[inline(always)]
+ pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: Unaligned,
{
- /// Constructs a new `LayoutVerified` for a type with no alignment
- /// requirement.
+ /// Constructs a new `Ref` for a type with no alignment requirement.
///
/// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
- /// constructs a new `LayoutVerified`. If the check fails, it returns
- /// `None`.
- #[inline]
- pub fn new_unaligned(bytes: B) -> Option<LayoutVerified<B, T>> {
- if bytes.len() != mem::size_of::<T>() {
- return None;
- }
- Some(LayoutVerified(bytes, PhantomData))
+ /// constructs a new `Ref`. If the check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
+ Ref::new(bytes)
}
- /// Constructs a new `LayoutVerified` from the prefix of a byte slice for a
- /// type with no alignment requirement.
+ /// Constructs a new `Ref` from the prefix of a byte slice for a type with
+ /// no alignment requirement.
///
/// `new_unaligned_from_prefix` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
- /// to the caller. If the length check fails, it returns `None`.
- #[inline]
- pub fn new_unaligned_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
- if bytes.len() < mem::size_of::<T>() {
- return None;
- }
- let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
- Some((LayoutVerified(bytes, PhantomData), suffix))
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
+ Ref::new_from_prefix(bytes)
}
- /// Constructs a new `LayoutVerified` from the suffix of a byte slice for a
- /// type with no alignment requirement.
+ /// Constructs a new `Ref` from the suffix of a byte slice for a type with
+ /// no alignment requirement.
///
/// `new_unaligned_from_suffix` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
- /// to the caller. If the length check fails, it returns `None`.
- #[inline]
- pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
- let bytes_len = bytes.len();
- if bytes_len < mem::size_of::<T>() {
- return None;
- }
- let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
- Some((prefix, LayoutVerified(bytes, PhantomData)))
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If the length check fails, it returns `None`.
+ #[inline(always)]
+ pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
+ Ref::new_from_suffix(bytes)
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: Unaligned,
{
- /// Constructs a new `LayoutVerified` of a slice type with no alignment
- /// requirement.
+ /// Constructs a new `Ref` of a slice type with no alignment requirement.
///
/// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
- /// `size_of::<T>()` and constructs a new `LayoutVerified`. If the check
- /// fails, it returns `None`.
+ /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
+ /// returns `None`.
///
/// # Panics
///
/// `new_slice` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_unaligned(bytes: B) -> Option<LayoutVerified<B, [T]>> {
- assert_ne!(mem::size_of::<T>(), 0);
- if bytes.len() % mem::size_of::<T>() != 0 {
- return None;
- }
- Some(LayoutVerified(bytes, PhantomData))
+ #[inline(always)]
+ pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
+ Ref::new_slice(bytes)
}
- /// Constructs a new `LayoutVerified` of a slice type with no alignment
- /// requirement from the prefix of a byte slice.
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the prefix of a byte slice.
///
/// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
/// count`. It consumes the first `size_of::<T>() * count` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
- /// to the caller. It also ensures that `sizeof::<T>() * count` does not
- /// overflow a `usize`. If either the length, or overflow checks fail, it
- /// returns `None`.
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
+ /// `usize`. If either the length, or overflow checks fail, it returns
+ /// `None`.
///
/// # Panics
///
/// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_unaligned_from_prefix(
- bytes: B,
- count: usize,
- ) -> Option<(LayoutVerified<B, [T]>, B)> {
- let expected_len = match mem::size_of::<T>().checked_mul(count) {
- Some(len) => len,
- None => return None,
- };
- if bytes.len() < expected_len {
- return None;
- }
- let (prefix, bytes) = bytes.split_at(expected_len);
- Self::new_slice_unaligned(prefix).map(move |l| (l, bytes))
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
+ Ref::new_slice_from_prefix(bytes, count)
}
- /// Constructs a new `LayoutVerified` of a slice type with no alignment
- /// requirement from the suffix of a byte slice.
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the suffix of a byte slice.
///
/// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
/// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
- /// to construct a `LayoutVerified`, and returns the remaining bytes to the
- /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
- /// `usize`. If either the length, or overflow checks fail, it returns
- /// `None`.
+ /// to construct a `Ref`, and returns the remaining bytes to the caller. It
+ /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If either the length, or overflow checks fail, it returns `None`.
///
/// # Panics
///
/// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_unaligned_from_suffix(
- bytes: B,
- count: usize,
- ) -> Option<(B, LayoutVerified<B, [T]>)> {
- let expected_len = match mem::size_of::<T>().checked_mul(count) {
- Some(len) => len,
- None => return None,
- };
- if bytes.len() < expected_len {
- return None;
- }
- let (bytes, suffix) = bytes.split_at(expected_len);
- Self::new_slice_unaligned(suffix).map(move |l| (bytes, l))
+ #[inline(always)]
+ pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
+ Ref::new_slice_from_suffix(bytes, count)
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: Unaligned,
{
- /// Constructs a new `LayoutVerified` for a type with no alignment
- /// requirement, zeroing the bytes.
+ /// Constructs a new `Ref` for a type with no alignment requirement, zeroing
+ /// the bytes.
///
/// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
- /// constructs a new `LayoutVerified`. If the check fails, it returns
- /// `None`.
+ /// constructs a new `Ref`. If the check fails, it returns `None`.
///
/// If the check succeeds, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
/// previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
+ #[inline(always)]
+ pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
map_zeroed(Self::new_unaligned(bytes))
}
- /// Constructs a new `LayoutVerified` from the prefix of a byte slice for a
- /// type with no alignment requirement, zeroing the prefix.
+ /// Constructs a new `Ref` from the prefix of a byte slice for a type with
+ /// no alignment requirement, zeroing the prefix.
///
/// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
- /// to the caller. If the length check fails, it returns `None`.
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. If the length check fails, it returns `None`.
///
/// If the check succeeds, then the prefix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
+ #[inline(always)]
+ pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
}
- /// Constructs a new `LayoutVerified` from the suffix of a byte slice for a
- /// type with no alignment requirement, zeroing the suffix.
+ /// Constructs a new `Ref` from the suffix of a byte slice for a type with
+ /// no alignment requirement, zeroing the suffix.
///
/// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
- /// to the caller. If the length check fails, it returns `None`.
+ /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
+ /// caller. If the length check fails, it returns `None`.
///
/// If the check succeeds, then the suffix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
- #[inline]
- pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
+ #[inline(always)]
+ pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
T: Unaligned,
{
- /// Constructs a new `LayoutVerified` for a slice type with no alignment
- /// requirement, zeroing the bytes.
+ /// Constructs a new `Ref` for a slice type with no alignment requirement,
+ /// zeroing the bytes.
///
/// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
- /// of `size_of::<T>()` and constructs a new `LayoutVerified`. If the check
- /// fails, it returns `None`.
+ /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
+ /// returns `None`.
///
/// If the check succeeds, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
@@ -1319,20 +2098,20 @@ where
/// # Panics
///
/// `new_slice` panics if `T` is a zero-sized type.
- #[inline]
- pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
+ #[inline(always)]
+ pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
map_zeroed(Self::new_slice_unaligned(bytes))
}
- /// Constructs a new `LayoutVerified` of a slice type with no alignment
- /// requirement from the prefix of a byte slice, after zeroing the bytes.
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the prefix of a byte slice, after zeroing the bytes.
///
/// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
/// count`. It consumes the first `size_of::<T>() * count` bytes from
- /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
- /// to the caller. It also ensures that `sizeof::<T>() * count` does not
- /// overflow a `usize`. If either the length, or overflow checks fail, it
- /// returns `None`.
+ /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
+ /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
+ /// `usize`. If either the length, or overflow checks fail, it returns
+ /// `None`.
///
/// If the checks succeed, then the prefix will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
@@ -1342,23 +2121,22 @@ where
///
/// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
/// type.
- #[inline]
+ #[inline(always)]
pub fn new_slice_unaligned_from_prefix_zeroed(
bytes: B,
count: usize,
- ) -> Option<(LayoutVerified<B, [T]>, B)> {
+ ) -> Option<(Ref<B, [T]>, B)> {
map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
}
- /// Constructs a new `LayoutVerified` of a slice type with no alignment
- /// requirement from the suffix of a byte slice, after zeroing the bytes.
+ /// Constructs a new `Ref` of a slice type with no alignment requirement
+ /// from the suffix of a byte slice, after zeroing the bytes.
///
/// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
/// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
- /// to construct a `LayoutVerified`, and returns the remaining bytes to the
- /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
- /// `usize`. If either the length, or overflow checks fail, it returns
- /// `None`.
+ /// to construct a `Ref`, and returns the remaining bytes to the caller. It
+ /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
+ /// If either the length, or overflow checks fail, it returns `None`.
///
/// If the checks succeed, then the suffix will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
@@ -1368,96 +2146,98 @@ where
///
/// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
/// type.
- #[inline]
+ #[inline(always)]
pub fn new_slice_unaligned_from_suffix_zeroed(
bytes: B,
count: usize,
- ) -> Option<(B, LayoutVerified<B, [T]>)> {
+ ) -> Option<(B, Ref<B, [T]>)> {
map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
}
}
-impl<'a, B, T> LayoutVerified<B, T>
+impl<'a, B, T> Ref<B, T>
where
B: 'a + ByteSlice,
T: FromBytes,
{
- /// Converts this `LayoutVerified` into a reference.
+ /// Converts this `Ref` into a reference.
///
- /// `into_ref` consumes the `LayoutVerified`, and returns a reference to
- /// `T`.
+ /// `into_ref` consumes the `Ref`, and returns a reference to `T`.
+ #[inline(always)]
pub fn into_ref(self) -> &'a T {
- // NOTE: This is safe because `B` is guaranteed to live for the lifetime
- // `'a`, meaning that a) the returned reference cannot outlive the `B`
- // from which `self` was constructed and, b) no mutable methods on that
- // `B` can be called during the lifetime of the returned reference. See
- // the documentation on `deref_helper` for what invariants we are
- // required to uphold.
+ // SAFETY: This is sound because `B` is guaranteed to live for the
+ // lifetime `'a`, meaning that a) the returned reference cannot outlive
+ // the `B` from which `self` was constructed and, b) no mutable methods
+ // on that `B` can be called during the lifetime of the returned
+ // reference. See the documentation on `deref_helper` for what
+ // invariants we are required to uphold.
unsafe { self.deref_helper() }
}
}
-impl<'a, B, T> LayoutVerified<B, T>
+impl<'a, B, T> Ref<B, T>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
- /// Converts this `LayoutVerified` into a mutable reference.
+ /// Converts this `Ref` into a mutable reference.
///
- /// `into_mut` consumes the `LayoutVerified`, and returns a mutable
- /// reference to `T`.
+ /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`.
+ #[inline(always)]
pub fn into_mut(mut self) -> &'a mut T {
- // NOTE: This is safe because `B` is guaranteed to live for the lifetime
- // `'a`, meaning that a) the returned reference cannot outlive the `B`
- // from which `self` was constructed and, b) no other methods - mutable
- // or immutable - on that `B` can be called during the lifetime of the
- // returned reference. See the documentation on `deref_mut_helper` for
- // what invariants we are required to uphold.
+ // SAFETY: This is sound because `B` is guaranteed to live for the
+ // lifetime `'a`, meaning that a) the returned reference cannot outlive
+ // the `B` from which `self` was constructed and, b) no other methods -
+ // mutable or immutable - on that `B` can be called during the lifetime
+ // of the returned reference. See the documentation on
+ // `deref_mut_helper` for what invariants we are required to uphold.
unsafe { self.deref_mut_helper() }
}
}
-impl<'a, B, T> LayoutVerified<B, [T]>
+impl<'a, B, T> Ref<B, [T]>
where
B: 'a + ByteSlice,
T: FromBytes,
{
- /// Converts this `LayoutVerified` into a slice reference.
+ /// Converts this `Ref` into a slice reference.
///
- /// `into_slice` consumes the `LayoutVerified`, and returns a reference to
- /// `[T]`.
+ /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`.
+ #[inline(always)]
pub fn into_slice(self) -> &'a [T] {
- // NOTE: This is safe because `B` is guaranteed to live for the lifetime
- // `'a`, meaning that a) the returned reference cannot outlive the `B`
- // from which `self` was constructed and, b) no mutable methods on that
- // `B` can be called during the lifetime of the returned reference. See
- // the documentation on `deref_slice_helper` for what invariants we are
- // required to uphold.
+ // SAFETY: This is sound because `B` is guaranteed to live for the
+ // lifetime `'a`, meaning that a) the returned reference cannot outlive
+ // the `B` from which `self` was constructed and, b) no mutable methods
+ // on that `B` can be called during the lifetime of the returned
+ // reference. See the documentation on `deref_slice_helper` for what
+ // invariants we are required to uphold.
unsafe { self.deref_slice_helper() }
}
}
-impl<'a, B, T> LayoutVerified<B, [T]>
+impl<'a, B, T> Ref<B, [T]>
where
B: 'a + ByteSliceMut,
T: FromBytes + AsBytes,
{
- /// Converts this `LayoutVerified` into a mutable slice reference.
+ /// Converts this `Ref` into a mutable slice reference.
///
- /// `into_mut_slice` consumes the `LayoutVerified`, and returns a mutable
- /// reference to `[T]`.
+ /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to
+ /// `[T]`.
+ #[inline(always)]
pub fn into_mut_slice(mut self) -> &'a mut [T] {
- // NOTE: This is safe because `B` is guaranteed to live for the lifetime
- // `'a`, meaning that a) the returned reference cannot outlive the `B`
- // from which `self` was constructed and, b) no other methods - mutable
- // or immutable - on that `B` can be called during the lifetime of the
- // returned reference. See the documentation on `deref_mut_slice_helper`
- // for what invariants we are required to uphold.
+ // SAFETY: This is sound because `B` is guaranteed to live for the
+ // lifetime `'a`, meaning that a) the returned reference cannot outlive
+ // the `B` from which `self` was constructed and, b) no other methods -
+ // mutable or immutable - on that `B` can be called during the lifetime
+ // of the returned reference. See the documentation on
+ // `deref_mut_slice_helper` for what invariants we are required to
+ // uphold.
unsafe { self.deref_mut_slice_helper() }
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
@@ -1474,11 +2254,15 @@ where
/// and no mutable references to the same memory may be constructed during
/// `'a`.
unsafe fn deref_helper<'a>(&self) -> &'a T {
- &*(self.0.as_ptr() as *const T)
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ &*self.0.as_ptr().cast::<T>()
+ }
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
@@ -1495,11 +2279,15 @@ where
/// and no other references - mutable or immutable - to the same memory may
/// be constructed during `'a`.
unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
- &mut *(self.0.as_mut_ptr() as *mut T)
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ &mut *self.0.as_mut_ptr().cast::<T>()
+ }
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
@@ -1513,13 +2301,22 @@ where
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
- debug_assert_eq!(len % elem_size, 0);
- let elems = len / elem_size;
- slice::from_raw_parts(self.0.as_ptr() as *const T, elems)
+ // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
+ // Thus, neither the mod nor division operations here can panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = {
+ debug_assert_eq!(len % elem_size, 0);
+ len / elem_size
+ };
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems)
+ }
}
}
-impl<B, T> LayoutVerified<B, [T]>
+impl<B, T> Ref<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
@@ -1534,17 +2331,22 @@ where
let len = self.0.len();
let elem_size = mem::size_of::<T>();
debug_assert_ne!(elem_size, 0);
- debug_assert_eq!(len % elem_size, 0);
- let elems = len / elem_size;
- slice::from_raw_parts_mut(self.0.as_mut_ptr() as *mut T, elems)
+ // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
+ // Thus, neither the mod nor division operations here can panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let elems = {
+ debug_assert_eq!(len % elem_size, 0);
+ len / elem_size
+ };
+ // TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ unsafe {
+ slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
+ }
}
}
-fn aligned_to(bytes: &[u8], align: usize) -> bool {
- (bytes as *const _ as *const () as usize) % align == 0
-}
-
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: ?Sized,
@@ -1556,7 +2358,7 @@ where
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: ?Sized,
@@ -1568,7 +2370,7 @@ where
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
@@ -1576,15 +2378,15 @@ where
/// Reads a copy of `T`.
#[inline]
pub fn read(&self) -> T {
- // SAFETY: Because of the invariants on `LayoutVerified`, we know that
- // `self.0` is at least `size_of::<T>()` bytes long, and that it is at
- // least as aligned as `align_of::<T>()`. Because `T: FromBytes`, it is
- // sound to interpret these bytes as a `T`.
- unsafe { ptr::read(self.0.as_ptr() as *const T) }
+ // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
+ // at least `size_of::<T>()` bytes long, and that it is at least as
+ // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to
+ // interpret these bytes as a `T`.
+ unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
}
}
-impl<B, T> LayoutVerified<B, T>
+impl<B, T> Ref<B, T>
where
B: ByteSliceMut,
T: AsBytes,
@@ -1592,16 +2394,16 @@ where
/// Writes the bytes of `t` and then forgets `t`.
#[inline]
pub fn write(&mut self, t: T) {
- // SAFETY: Because of the invariants on `LayoutVerified`, we know that
- // `self.0` is at least `size_of::<T>()` bytes long, and that it is at
- // least as aligned as `align_of::<T>()`. Writing `t` to the buffer will
- // allow all of the bytes of `t` to be accessed as a `[u8]`, but because
- // `T: AsBytes`, we know this is sound.
- unsafe { ptr::write(self.0.as_mut_ptr() as *mut T, t) }
+ // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
+ // at least `size_of::<T>()` bytes long, and that it is at least as
+ // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow
+ // all of the bytes of `t` to be accessed as a `[u8]`, but because `T:
+ // AsBytes`, we know this is sound.
+ unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) }
}
}
-impl<B, T> Deref for LayoutVerified<B, T>
+impl<B, T> Deref for Ref<B, T>
where
B: ByteSlice,
T: FromBytes,
@@ -1609,7 +2411,7 @@ where
type Target = T;
#[inline]
fn deref(&self) -> &T {
- // SAFETY: This is safe because the lifetime of `self` is the same as
+ // SAFETY: This is sound because the lifetime of `self` is the same as
// the lifetime of the return value, meaning that a) the returned
// reference cannot outlive `self` and, b) no mutable methods on `self`
// can be called during the lifetime of the returned reference. See the
@@ -1619,14 +2421,14 @@ where
}
}
-impl<B, T> DerefMut for LayoutVerified<B, T>
+impl<B, T> DerefMut for Ref<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
- // SAFETY: This is safe because the lifetime of `self` is the same as
+ // SAFETY: This is sound because the lifetime of `self` is the same as
// the lifetime of the return value, meaning that a) the returned
// reference cannot outlive `self` and, b) no other methods on `self`
// can be called during the lifetime of the returned reference. See the
@@ -1636,7 +2438,7 @@ where
}
}
-impl<B, T> Deref for LayoutVerified<B, [T]>
+impl<B, T> Deref for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
@@ -1644,7 +2446,7 @@ where
type Target = [T];
#[inline]
fn deref(&self) -> &[T] {
- // SAFETY: This is safe because the lifetime of `self` is the same as
+ // SAFETY: This is sound because the lifetime of `self` is the same as
// the lifetime of the return value, meaning that a) the returned
// reference cannot outlive `self` and, b) no mutable methods on `self`
// can be called during the lifetime of the returned reference. See the
@@ -1654,14 +2456,14 @@ where
}
}
-impl<B, T> DerefMut for LayoutVerified<B, [T]>
+impl<B, T> DerefMut for Ref<B, [T]>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut [T] {
- // SAFETY: This is safe because the lifetime of `self` is the same as
+ // SAFETY: This is sound because the lifetime of `self` is the same as
// the lifetime of the return value, meaning that a) the returned
// reference cannot outlive `self` and, b) no other methods on `self`
// can be called during the lifetime of the returned reference. See the
@@ -1671,7 +2473,7 @@ where
}
}
-impl<T, B> Display for LayoutVerified<B, T>
+impl<T, B> Display for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Display,
@@ -1683,7 +2485,7 @@ where
}
}
-impl<T, B> Display for LayoutVerified<B, [T]>
+impl<T, B> Display for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes,
@@ -1696,7 +2498,7 @@ where
}
}
-impl<T, B> Debug for LayoutVerified<B, T>
+impl<T, B> Debug for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Debug,
@@ -1704,11 +2506,11 @@ where
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &T = self;
- fmt.debug_tuple("LayoutVerified").field(&inner).finish()
+ fmt.debug_tuple("Ref").field(&inner).finish()
}
}
-impl<T, B> Debug for LayoutVerified<B, [T]>
+impl<T, B> Debug for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Debug,
@@ -1716,25 +2518,25 @@ where
#[inline]
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
let inner: &[T] = self;
- fmt.debug_tuple("LayoutVerified").field(&inner).finish()
+ fmt.debug_tuple("Ref").field(&inner).finish()
}
}
-impl<T, B> Eq for LayoutVerified<B, T>
+impl<T, B> Eq for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
-impl<T, B> Eq for LayoutVerified<B, [T]>
+impl<T, B> Eq for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Eq,
{
}
-impl<T, B> PartialEq for LayoutVerified<B, T>
+impl<T, B> PartialEq for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + PartialEq,
@@ -1745,7 +2547,7 @@ where
}
}
-impl<T, B> PartialEq for LayoutVerified<B, [T]>
+impl<T, B> PartialEq for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialEq,
@@ -1756,7 +2558,7 @@ where
}
}
-impl<T, B> Ord for LayoutVerified<B, T>
+impl<T, B> Ord for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + Ord,
@@ -1769,7 +2571,7 @@ where
}
}
-impl<T, B> Ord for LayoutVerified<B, [T]>
+impl<T, B> Ord for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + Ord,
@@ -1782,7 +2584,7 @@ where
}
}
-impl<T, B> PartialOrd for LayoutVerified<B, T>
+impl<T, B> PartialOrd for Ref<B, T>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
@@ -1795,7 +2597,7 @@ where
}
}
-impl<T, B> PartialOrd for LayoutVerified<B, [T]>
+impl<T, B> PartialOrd for Ref<B, [T]>
where
B: ByteSlice,
T: FromBytes + PartialOrd,
@@ -1809,13 +2611,8 @@ where
}
mod sealed {
- use core::cell::{Ref, RefMut};
-
- pub trait Sealed {}
- impl<'a> Sealed for &'a [u8] {}
- impl<'a> Sealed for &'a mut [u8] {}
- impl<'a> Sealed for Ref<'a, [u8]> {}
- impl<'a> Sealed for RefMut<'a, [u8]> {}
+ pub trait ByteSliceSealed {}
+ pub trait KnownLayoutSealed {}
}
// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
@@ -1827,6 +2624,7 @@ mod sealed {
// unsafe code. Thus, we seal them and implement it only for known-good
// reference types. For the same reason, they're unsafe traits.
+#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
/// A mutable or immutable reference to a byte slice.
///
/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
@@ -1838,11 +2636,24 @@ mod sealed {
/// method would involve reallocation, and `split_at` must be a very cheap
/// operation in order for the utilities in this crate to perform as designed.
///
-/// [`Vec<u8>`]: std::vec::Vec
/// [`split_at`]: crate::ByteSlice::split_at
-pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed {
+// It may seem overkill to go to this length to ensure that this doc link never
+// breaks. We do this because it simplifies CI - it means that generating docs
+// always succeeds, so we don't need special logic to only generate docs under
+// certain features.
+#[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
+#[cfg_attr(
+ not(feature = "alloc"),
+ doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
+)]
+pub unsafe trait ByteSlice:
+ Deref<Target = [u8]> + Sized + self::sealed::ByteSliceSealed
+{
/// Gets a raw pointer to the first byte in the slice.
- fn as_ptr(&self) -> *const u8;
+ #[inline]
+ fn as_ptr(&self) -> *const u8 {
+ <[u8]>::as_ptr(self)
+ }
/// Splits the slice at the midpoint.
///
@@ -1854,6 +2665,7 @@ pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed
fn split_at(self, mid: usize) -> (Self, Self);
}
+#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
/// A mutable reference to a byte slice.
///
/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
@@ -1861,62 +2673,65 @@ pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed
/// `RefMut<[u8]>`.
pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
/// Gets a mutable raw pointer to the first byte in the slice.
- fn as_mut_ptr(&mut self) -> *mut u8;
+ #[inline]
+ fn as_mut_ptr(&mut self) -> *mut u8 {
+ <[u8]>::as_mut_ptr(self)
+ }
}
+impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a [u8] {
- fn as_ptr(&self) -> *const u8 {
- <[u8]>::as_ptr(self)
- }
+ #[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at(self, mid)
}
}
+
+impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for &'a mut [u8] {
- fn as_ptr(&self) -> *const u8 {
- <[u8]>::as_ptr(self)
- }
+ #[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at_mut(self, mid)
}
}
-unsafe impl<'a> ByteSlice for Ref<'a, [u8]> {
- fn as_ptr(&self) -> *const u8 {
- <[u8]>::as_ptr(self)
- }
+
+impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
+ #[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
- Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
+ cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
}
}
+
+impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
- fn as_ptr(&self) -> *const u8 {
- <[u8]>::as_ptr(self)
- }
+ #[inline]
fn split_at(self, mid: usize) -> (Self, Self) {
RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
}
}
-unsafe impl<'a> ByteSliceMut for &'a mut [u8] {
- fn as_mut_ptr(&mut self) -> *mut u8 {
- <[u8]>::as_mut_ptr(self)
- }
-}
-unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {
- fn as_mut_ptr(&mut self) -> *mut u8 {
- <[u8]>::as_mut_ptr(self)
- }
-}
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
-#[cfg(any(test, feature = "alloc"))]
+// TODO(#61): Add a "SAFETY" comment and remove this `allow`.
+#[allow(clippy::undocumented_unsafe_blocks)]
+unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
+
+#[cfg(feature = "alloc")]
mod alloc_support {
- pub(crate) extern crate alloc;
- pub(crate) use super::*;
- pub(crate) use alloc::alloc::Layout;
- pub(crate) use alloc::boxed::Box;
- pub(crate) use alloc::vec::Vec;
- pub(crate) use core::mem::{align_of, size_of};
- pub(crate) use core::ptr::NonNull;
+ use alloc::vec::Vec;
+
+ use super::*;
/// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
/// vector. The new items are initialized with zeroes.
@@ -1924,7 +2739,8 @@ mod alloc_support {
/// # Panics
///
/// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
- pub fn extend_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, additional: usize) {
+ #[inline(always)]
+ pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
insert_vec_zeroed(v, v.len(), additional);
}
@@ -1935,27 +2751,198 @@ mod alloc_support {
///
/// * Panics if `position > v.len()`.
/// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
- pub fn insert_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, position: usize, additional: usize) {
+ #[inline]
+ pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
assert!(position <= v.len());
v.reserve(additional);
- // The reserve() call guarantees that these cannot overflow:
+ // SAFETY: The `reserve` call guarantees that these cannot overflow:
// * `ptr.add(position)`
// * `position + additional`
// * `v.len() + additional`
//
// `v.len() - position` cannot overflow because we asserted that
- // position <= v.len().
+ // `position <= v.len()`.
unsafe {
// This is a potentially overlapping copy.
let ptr = v.as_mut_ptr();
+ #[allow(clippy::arithmetic_side_effects)]
ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
ptr.add(position).write_bytes(0, additional);
+ #[allow(clippy::arithmetic_side_effects)]
v.set_len(v.len() + additional);
}
}
+
+ #[cfg(test)]
+ mod tests {
+ use super::*;
+
+ #[test]
+ fn test_extend_vec_zeroed() {
+ // Test extending when there is an existing allocation.
+ let mut v = vec![100u64, 200, 300];
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 6);
+ assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
+ drop(v);
+
+ // Test extending when there is no existing allocation.
+ let mut v: Vec<u64> = Vec::new();
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 3);
+ assert_eq!(&*v, &[0, 0, 0]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_extend_vec_zeroed_zst() {
+ // Test extending when there is an existing (fake) allocation.
+ let mut v = vec![(), (), ()];
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(v.len(), 6);
+ assert_eq!(&*v, &[(), (), (), (), (), ()]);
+ drop(v);
+
+ // Test extending when there is no existing (fake) allocation.
+ let mut v: Vec<()> = Vec::new();
+ extend_vec_zeroed(&mut v, 3);
+ assert_eq!(&*v, &[(), (), ()]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_insert_vec_zeroed() {
+ // Insert at start (no existing allocation).
+ let mut v: Vec<u64> = Vec::new();
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(&*v, &[0, 0]);
+ drop(v);
+
+ // Insert at start.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 5);
+ assert_eq!(&*v, &[0, 0, 100, 200, 300]);
+ drop(v);
+
+ // Insert at middle.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 1, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[100, 0, 200, 300]);
+ drop(v);
+
+ // Insert at end.
+ let mut v = vec![100u64, 200, 300];
+ insert_vec_zeroed(&mut v, 3, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[100, 200, 300, 0]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_insert_vec_zeroed_zst() {
+ // Insert at start (no existing fake allocation).
+ let mut v: Vec<()> = Vec::new();
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(&*v, &[(), ()]);
+ drop(v);
+
+ // Insert at start.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 0, 2);
+ assert_eq!(v.len(), 5);
+ assert_eq!(&*v, &[(), (), (), (), ()]);
+ drop(v);
+
+ // Insert at middle.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 1, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[(), (), (), ()]);
+ drop(v);
+
+ // Insert at end.
+ let mut v = vec![(), (), ()];
+ insert_vec_zeroed(&mut v, 3, 1);
+ assert_eq!(v.len(), 4);
+ assert_eq!(&*v, &[(), (), (), ()]);
+ drop(v);
+ }
+
+ #[test]
+ fn test_new_box_zeroed() {
+ assert_eq!(*u64::new_box_zeroed(), 0);
+ }
+
+ #[test]
+ fn test_new_box_zeroed_array() {
+ drop(<[u32; 0x1000]>::new_box_zeroed());
+ }
+
+ #[test]
+ fn test_new_box_zeroed_zst() {
+ // This test exists in order to exercise unsafe code, especially
+ // when running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(*<()>::new_box_zeroed(), ());
+ }
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed() {
+ let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
+ assert_eq!(s.len(), 3);
+ assert_eq!(&*s, &[0, 0, 0]);
+ s[1] = 3;
+ assert_eq!(&*s, &[0, 3, 0]);
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_empty() {
+ let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
+ assert_eq!(s.len(), 0);
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_zst() {
+ let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
+ assert_eq!(s.len(), 3);
+ assert!(s.get(10).is_none());
+ // This test exists in order to exercise unsafe code, especially
+ // when running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(s[1], ());
+ }
+ s[2] = ();
+ }
+
+ #[test]
+ fn test_new_box_slice_zeroed_zst_empty() {
+ let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
+ assert_eq!(s.len(), 0);
+ }
+
+ #[test]
+ #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
+ fn test_new_box_slice_zeroed_panics_mul_overflow() {
+ let _ = u16::new_box_slice_zeroed(usize::MAX);
+ }
+
+ #[test]
+ #[should_panic(expected = "assertion failed: size <= max_alloc")]
+ fn test_new_box_slice_zeroed_panics_isize_overflow() {
+ let max = usize::try_from(isize::MAX).unwrap();
+ let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
+ }
+ }
}
-#[cfg(any(test, feature = "alloc"))]
+#[cfg(feature = "alloc")]
#[doc(inline)]
pub use alloc_support::*;
@@ -1965,25 +2952,295 @@ mod tests {
use core::ops::Deref;
+ use static_assertions::assert_impl_all;
+
use super::*;
+ use crate::util::testutil::*;
+
+ // An unsized type.
+ //
+ // This is used to test the custom derives of our traits. The `[u8]` type
+ // gets a hand-rolled impl, so it doesn't exercise our custom derives.
+ #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
+ #[repr(transparent)]
+ struct Unsized([u8]);
+
+ impl Unsized {
+ fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
+ // SAFETY: This *probably* sound - since the layouts of `[u8]` and
+ // `Unsized` are the same, so are the layouts of `&mut [u8]` and
+ // `&mut Unsized`. [1] Even if it turns out that this isn't actually
+ // guaranteed by the language spec, we can just change this since
+ // it's in test code.
+ //
+ // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
+ unsafe { mem::transmute(slc) }
+ }
+ }
- // B should be [u8; N]. T will require that the entire structure is aligned
- // to the alignment of T.
- #[derive(Default)]
- struct AlignedBuffer<T, B> {
- buf: B,
- _t: T,
+ // This test takes a long time when running under Miri, so we skip it in
+ // that case. This is acceptable because this is a logic test that doesn't
+ // attempt to expose UB.
+ #[test]
+ #[cfg_attr(miri, ignore)]
+ fn test_validate_cast_and_convert_metadata() {
+ fn layout(
+ base_size: usize,
+ align: usize,
+ _trailing_slice_elem_size: Option<usize>,
+ ) -> DstLayout {
+ DstLayout {
+ _base_layout: Layout::from_size_align(base_size, align).unwrap(),
+ _trailing_slice_elem_size,
+ }
+ }
+
+ /// This macro accepts arguments in the form of:
+ ///
+ /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _)))
+ /// | | | | | | | |
+ /// base_size ----+ | | | | | | |
+ /// align -----------+ | | | | | |
+ /// trailing_size ------+ | | | | |
+ /// addr ---------------------------+ | | | |
+ /// bytes_len -------------------------+ | | |
+ /// cast_type ----------------------------+ | |
+ /// elems ---------------------------------------------+ |
+ /// split_at ---------------------------------------------+
+ ///
+ /// `.validate` is shorthand for `.validate_cast_and_convert_metadata`
+ /// for brevity.
+ ///
+ /// Each argument can either be an iterator or a wildcard. Each
+ /// wildcarded variable is implicitly replaced by an iterator over a
+ /// representative sample of values for that variable. Each `test!`
+ /// invocation iterates over every combination of values provided by
+ /// each variable's iterator (ie, the cartesian product) and validates
+ /// that the results are expected.
+ ///
+ /// The final argument uses the same syntax, but it has a different
+ /// meaning:
+ /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to
+ /// `assert_matches!` to validate the computed result for each
+ /// combination of input values.
+ /// - If it is `Err(msg)`, then `test!` validates that the call to
+ /// `validate_cast_and_convert_metadata` panics with the given panic
+ /// message.
+ ///
+ /// Note that the meta-variables that match these variables have the
+ /// `tt` type, and some valid expressions are not valid `tt`s (such as
+ /// `a..b`). In this case, wrap the expression in parentheses, and it
+ /// will become valid `tt`.
+ macro_rules! test {
+ (
+ layout($base_size:tt, $align:tt, $trailing_size:tt)
+ .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
+ ) => {
+ itertools::iproduct!(
+ test!(@generate_usize $base_size),
+ test!(@generate_align $align),
+ test!(@generate_opt_usize $trailing_size),
+ test!(@generate_usize $addr),
+ test!(@generate_usize $bytes_len),
+ test!(@generate_cast_type $cast_type)
+ ).for_each(|(base_size, align, trailing_size, addr, bytes_len, cast_type)| {
+ let actual = std::panic::catch_unwind(|| {
+ layout(base_size, align, trailing_size)._validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
+ }).map_err(|d| {
+ *d.downcast::<&'static str>().expect("expected string panic message").as_ref()
+ });
+ assert_matches::assert_matches!(
+ actual, $expect,
+ "layout({base_size}, {align}, {trailing_size:?}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
+ );
+ });
+ };
+ (@generate_usize _) => { 0..8 };
+ (@generate_align _) => { [1, 2, 4, 8, 16] };
+ (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
+ (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
+ (@generate_cast_type $variant:ident) => { [_CastType::$variant] };
+ // Some expressions need to be wrapped in parentheses in order to be
+ // valid `tt`s (required by the top match pattern). See the comment
+ // below for more details. This arm removes these parentheses to
+ // avoid generating an `unused_parens` warning.
+ (@$_:ident ($vals:expr)) => { $vals };
+ (@$_:ident $vals:expr) => { $vals };
+ }
+
+ const EVENS: [usize; 5] = [0, 2, 4, 6, 8];
+ const NZ_EVENS: [usize; 5] = [2, 4, 6, 8, 10];
+ const ODDS: [usize; 5] = [1, 3, 5, 7, 9];
+
+ // base_size is too big for the memory region.
+ test!(layout((1..8), _, ((1..8).map(Some))).validate(_, [0], _), Ok(None));
+ test!(layout((2..8), _, ((1..8).map(Some))).validate(_, [1], _), Ok(None));
+
+ // addr is unaligned for prefix cast
+ test!(layout(_, [2], [None]).validate(ODDS, _, _Prefix), Ok(None));
+ test!(layout(_, [2], (NZ_EVENS.map(Some))).validate(ODDS, _, _Prefix), Ok(None));
+
+ // addr is aligned, but end of buffer is unaligned for suffix cast
+ test!(layout(_, [2], [None]).validate(EVENS, ODDS, _Suffix), Ok(None));
+ test!(layout(_, [2], (NZ_EVENS.map(Some))).validate(EVENS, ODDS, _Suffix), Ok(None));
+
+ // Unfortunately, these constants cannot easily be used in the
+ // implementation of `validate_cast_and_convert_metadata`, since
+ // `panic!` consumes a string literal, not an expression.
+ //
+ // It's important that these messages be in a separate module. If they
+ // were at the function's top level, we'd pass them to `test!` as, e.g.,
+ // `Err(TRAILING)`, which would run into a subtle Rust footgun - the
+ // `TRAILING` identifier would be treated as a pattern to match rather
+ // than a value to check for equality.
+ mod msgs {
+ pub(super) const TRAILING: &str =
+ "attempted to cast to slice type with zero-sized element";
+ pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
+ }
+
+ // casts with ZST trailing element types are unsupported
+ test!(layout(_, _, [Some(0)]).validate(_, _, _), Err(msgs::TRAILING),);
+
+ // addr + bytes_len must not overflow usize
+ test!(
+ layout(_, [1], (NZ_EVENS.map(Some))).validate([usize::MAX], (1..100), _),
+ Err(msgs::OVERFLOW)
+ );
+ test!(layout(_, [1], [None]).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
+ test!(
+ layout([1], [1], [None]).validate(
+ [usize::MAX / 2 + 1, usize::MAX],
+ [usize::MAX / 2 + 1, usize::MAX],
+ _
+ ),
+ Err(msgs::OVERFLOW)
+ );
+
+ // Validates that `validate_cast_and_convert_metadata` satisfies its own
+ // documented safety postconditions, and also a few other properties
+ // that aren't documented but we want to guarantee anyway.
+ fn validate_behavior(
+ (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
+ ) {
+ if let Some((elems, split_at)) =
+ layout._validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
+ {
+ let (base_size, align, trailing_elem_size) = (
+ layout._base_layout.size(),
+ layout._base_layout.align(),
+ layout._trailing_slice_elem_size,
+ );
+
+ let debug_str = format!(
+ "layout({base_size}, {align}, {trailing_elem_size:?}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
+ );
+
+ // If this is a sized type (no trailing slice), then `elems` is
+ // meaningless, but in practice we set it to 0. Callers are not
+ // allowed to rely on this, but a lot of math is nicer if
+ // they're able to, and some callers might accidentally do that.
+ assert!(!(trailing_elem_size.is_none() && elems != 0), "{}", debug_str);
+
+ let resulting_size = base_size + (elems * trailing_elem_size.unwrap_or(0));
+ // Test that, for unsized types, `validate_cast_and_convert_metadata` computed the
+ // largest possible value that fits in the given byte range.
+ assert!(
+ trailing_elem_size
+ .map(|elem_size| resulting_size + elem_size > bytes_len)
+ .unwrap_or(true),
+ "{}",
+ debug_str
+ );
+
+ // Test safety postconditions guaranteed by `validate_cast_and_convert_metadata`.
+ assert!(resulting_size <= bytes_len);
+ match cast_type {
+ _CastType::_Prefix => {
+ assert_eq!(addr % align, 0, "{}", debug_str);
+ assert_eq!(resulting_size, split_at, "{}", debug_str);
+ }
+ _CastType::_Suffix => {
+ assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
+ assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
+ }
+ }
+ }
+ }
+
+ let layouts = itertools::iproduct!(0..8, [1, 2, 4, 8], (1..8).map(Some).chain([None]))
+ .filter(|(size, align, trailing_elem_size)| {
+ size % align == 0 && trailing_elem_size.unwrap_or(*align) % align == 0
+ })
+ .map(|(s, a, t)| layout(s, a, t));
+ itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
+ .for_each(validate_behavior);
}
- impl<T, B: Default> AlignedBuffer<T, B> {
- fn clear_buf(&mut self) {
- self.buf = B::default();
+ #[test]
+ fn test_known_layout() {
+ // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
+ // Test that `PhantomData<$ty>` has the same layout as `()` regardless
+ // of `$ty`.
+ macro_rules! test {
+ ($ty:ty, $expect:expr) => {
+ let expect = $expect;
+ assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
+ assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
+ assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
+ };
}
+
+ let layout = |base_size, align, _trailing_slice_elem_size| DstLayout {
+ _base_layout: Layout::from_size_align(base_size, align).unwrap(),
+ _trailing_slice_elem_size,
+ };
+
+ test!((), layout(0, 1, None));
+ test!(u8, layout(1, 1, None));
+ // Use `align_of` because `u64` alignment may be smaller than 8 on some
+ // platforms.
+ test!(u64, layout(8, mem::align_of::<u64>(), None));
+ test!(AU64, layout(8, 8, None));
+
+ test!(Option<&'static ()>, usize::LAYOUT);
+
+ test!([()], layout(0, 1, Some(0)));
+ test!([u8], layout(0, 1, Some(1)));
+ test!(str, layout(0, 1, Some(1)));
+ }
+
+ #[test]
+ fn test_object_safety() {
+ fn _takes_from_zeroes(_: &dyn FromZeroes) {}
+ fn _takes_from_bytes(_: &dyn FromBytes) {}
+ fn _takes_unaligned(_: &dyn Unaligned) {}
}
- // convert a u64 to bytes using this platform's endianness
- fn u64_to_bytes(u: u64) -> [u8; 8] {
- unsafe { ptr::read(&u as *const u64 as *const [u8; 8]) }
+ #[test]
+ fn test_from_zeroes_only() {
+ // Test types that implement `FromZeroes` but not `FromBytes`.
+
+ assert!(!bool::new_zeroed());
+ assert_eq!(char::new_zeroed(), '\0');
+
+ #[cfg(feature = "alloc")]
+ {
+ assert_eq!(bool::new_box_zeroed(), Box::new(false));
+ assert_eq!(char::new_box_zeroed(), Box::new('\0'));
+
+ assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
+ assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
+
+ assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
+ assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
+ }
+
+ let mut string = "hello".to_string();
+ let s: &mut str = string.as_mut();
+ assert_eq!(s, "hello");
+ s.zero();
+ assert_eq!(s, "\0\0\0\0\0");
}
#[test]
@@ -1994,7 +3251,7 @@ mod tests {
#[cfg(target_endian = "little")]
const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
- // Test FromBytes::{read_from, read_from_prefix, read_from_suffix}
+ // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
// The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
@@ -2008,7 +3265,7 @@ mod tests {
assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
- // Test AsBytes::{write_to, write_to_prefix, write_to_suffix}
+ // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`.
let mut bytes = [0u8; 8];
assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
@@ -2044,595 +3301,608 @@ mod tests {
}
}
let _: () = transmute!(PanicOnDrop(()));
+
+ // Test that `transmute!` is legal in a const context.
+ const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
+ const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
+ const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
+ assert_eq!(X, ARRAY_OF_ARRAYS);
}
#[test]
fn test_address() {
- // test that the Deref and DerefMut implementations return a reference
- // which points to the right region of memory
+ // Test that the `Deref` and `DerefMut` implementations return a
+ // reference which points to the right region of memory.
let buf = [0];
- let lv = LayoutVerified::<_, u8>::new(&buf[..]).unwrap();
+ let r = Ref::<_, u8>::new(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
- let deref_ptr = lv.deref() as *const u8;
+ let deref_ptr: *const u8 = r.deref();
assert_eq!(buf_ptr, deref_ptr);
let buf = [0];
- let lv = LayoutVerified::<_, [u8]>::new_slice(&buf[..]).unwrap();
+ let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
- let deref_ptr = lv.deref().as_ptr();
+ let deref_ptr = r.deref().as_ptr();
assert_eq!(buf_ptr, deref_ptr);
}
- // verify that values written to a LayoutVerified are properly shared
- // between the typed and untyped representations, that reads via `deref` and
- // `read` behave the same, and that writes via `deref_mut` and `write`
- // behave the same
- fn test_new_helper<'a>(mut lv: LayoutVerified<&'a mut [u8], u64>) {
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations, that reads via `deref` and `read`
+ // behave the same, and that writes via `deref_mut` and `write` behave the
+ // same.
+ fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
// assert that the value starts at 0
- assert_eq!(*lv, 0);
- assert_eq!(lv.read(), 0);
-
- // assert that values written to the typed value are reflected in the
- // byte slice
- const VAL1: u64 = 0xFF00FF00FF00FF00;
- *lv = VAL1;
- assert_eq!(lv.bytes(), &u64_to_bytes(VAL1));
- *lv = 0;
- lv.write(VAL1);
- assert_eq!(lv.bytes(), &u64_to_bytes(VAL1));
-
- // assert that values written to the byte slice are reflected in the
- // typed value
- const VAL2: u64 = !VAL1; // different from VAL1
- lv.bytes_mut().copy_from_slice(&u64_to_bytes(VAL2)[..]);
- assert_eq!(*lv, VAL2);
- assert_eq!(lv.read(), VAL2);
- }
-
- // verify that values written to a LayoutVerified are properly shared
- // between the typed and untyped representations; pass a value with
- // `typed_len` `u64`s backed by an array of `typed_len * 8` bytes.
- fn test_new_helper_slice<'a>(mut lv: LayoutVerified<&'a mut [u8], [u64]>, typed_len: usize) {
- // assert that the value starts out zeroed
- assert_eq!(&*lv, vec![0; typed_len].as_slice());
-
- // check the backing storage is the exact same slice
+ assert_eq!(*r, AU64(0));
+ assert_eq!(r.read(), AU64(0));
+
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
+ *r = VAL1;
+ assert_eq!(r.bytes(), &VAL1.to_bytes());
+ *r = AU64(0);
+ r.write(VAL1);
+ assert_eq!(r.bytes(), &VAL1.to_bytes());
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1`
+ r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
+ assert_eq!(*r, VAL2);
+ assert_eq!(r.read(), VAL2);
+ }
+
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations; pass a value with `typed_len` `AU64`s
+ // backed by an array of `typed_len * 8` bytes.
+ fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
+ // Assert that the value starts out zeroed.
+ assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
+
+ // Check the backing storage is the exact same slice.
let untyped_len = typed_len * 8;
- assert_eq!(lv.bytes().len(), untyped_len);
- assert_eq!(lv.bytes().as_ptr(), lv.as_ptr() as *const u8);
+ assert_eq!(r.bytes().len(), untyped_len);
+ assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
- // assert that values written to the typed value are reflected in the
- // byte slice
- const VAL1: u64 = 0xFF00FF00FF00FF00;
- for typed in &mut *lv {
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
+ const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
+ for typed in &mut *r {
*typed = VAL1;
}
- assert_eq!(lv.bytes(), VAL1.to_ne_bytes().repeat(typed_len).as_slice());
+ assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
- // assert that values written to the byte slice are reflected in the
- // typed value
- const VAL2: u64 = !VAL1; // different from VAL1
- lv.bytes_mut().copy_from_slice(&VAL2.to_ne_bytes().repeat(typed_len));
- assert!(lv.iter().copied().all(|x| x == VAL2));
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
+ const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1
+ r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
+ assert!(r.iter().copied().all(|x| x == VAL2));
}
- // verify that values written to a LayoutVerified are properly shared
- // between the typed and untyped representations, that reads via `deref` and
- // `read` behave the same, and that writes via `deref_mut` and `write`
- // behave the same
- fn test_new_helper_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8; 8]>) {
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations, that reads via `deref` and `read`
+ // behave the same, and that writes via `deref_mut` and `write` behave the
+ // same.
+ fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
// assert that the value starts at 0
- assert_eq!(*lv, [0; 8]);
- assert_eq!(lv.read(), [0; 8]);
+ assert_eq!(*r, [0; 8]);
+ assert_eq!(r.read(), [0; 8]);
- // assert that values written to the typed value are reflected in the
- // byte slice
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
- *lv = VAL1;
- assert_eq!(lv.bytes(), &VAL1);
- *lv = [0; 8];
- lv.write(VAL1);
- assert_eq!(lv.bytes(), &VAL1);
-
- // assert that values written to the byte slice are reflected in the
- // typed value
+ *r = VAL1;
+ assert_eq!(r.bytes(), &VAL1);
+ *r = [0; 8];
+ r.write(VAL1);
+ assert_eq!(r.bytes(), &VAL1);
+
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
- lv.bytes_mut().copy_from_slice(&VAL2[..]);
- assert_eq!(*lv, VAL2);
- assert_eq!(lv.read(), VAL2);
+ r.bytes_mut().copy_from_slice(&VAL2[..]);
+ assert_eq!(*r, VAL2);
+ assert_eq!(r.read(), VAL2);
}
- // verify that values written to a LayoutVerified are properly shared
- // between the typed and untyped representations; pass a value with
- // `len` `u8`s backed by an array of `len` bytes.
- fn test_new_helper_slice_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8]>, len: usize) {
- // assert that the value starts out zeroed
- assert_eq!(&*lv, vec![0u8; len].as_slice());
+ // Verify that values written to a `Ref` are properly shared between the
+ // typed and untyped representations; pass a value with `len` `u8`s backed
+ // by an array of `len` bytes.
+ fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
+ // Assert that the value starts out zeroed.
+ assert_eq!(&*r, vec![0u8; len].as_slice());
- // check the backing storage is the exact same slice
- assert_eq!(lv.bytes().len(), len);
- assert_eq!(lv.bytes().as_ptr(), lv.as_ptr());
+ // Check the backing storage is the exact same slice.
+ assert_eq!(r.bytes().len(), len);
+ assert_eq!(r.bytes().as_ptr(), r.as_ptr());
- // assert that values written to the typed value are reflected in the
- // byte slice
+ // Assert that values written to the typed value are reflected in the
+ // byte slice.
let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
- lv.copy_from_slice(&expected_bytes);
- assert_eq!(lv.bytes(), expected_bytes.as_slice());
+ r.copy_from_slice(&expected_bytes);
+ assert_eq!(r.bytes(), expected_bytes.as_slice());
- // assert that values written to the byte slice are reflected in the
- // typed value
+ // Assert that values written to the byte slice are reflected in the
+ // typed value.
for byte in &mut expected_bytes {
- *byte = !*byte; // different from expected_len
+ *byte = !*byte; // different from `expected_len`
}
- lv.bytes_mut().copy_from_slice(&expected_bytes);
- assert_eq!(&*lv, expected_bytes.as_slice());
+ r.bytes_mut().copy_from_slice(&expected_bytes);
+ assert_eq!(&*r, expected_bytes.as_slice());
}
#[test]
fn test_new_aligned_sized() {
// Test that a properly-aligned, properly-sized buffer works for new,
- // new_from_preifx, and new_from_suffix, and that new_from_prefix and
+ // new_from_prefix, and new_from_suffix, and that new_from_prefix and
// new_from_suffix return empty slices. Test that a properly-aligned
// buffer whose length is a multiple of the element size works for
// new_slice. Test that xxx_zeroed behaves the same, and zeroes the
// memory.
- // a buffer with an alignment of 8
- let mut buf = AlignedBuffer::<u64, [u8; 8]>::default();
- // buf.buf should be aligned to 8, so this should always succeed
- test_new_helper(LayoutVerified::<_, u64>::new(&mut buf.buf[..]).unwrap());
- buf.buf = [0xFFu8; 8];
- test_new_helper(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).unwrap());
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 8], AU64>::default();
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
+ buf.t = [0xFFu8; 8];
+ test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
{
- // in a block so that lv and suffix don't live too long
- buf.clear_buf();
- let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
+ // In a block so that `r` and `suffix` don't live too long.
+ buf.set_default();
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.buf = [0xFFu8; 8];
- let (lv, suffix) =
- LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
+ buf.t = [0xFFu8; 8];
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
assert!(suffix.is_empty());
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.clear_buf();
- let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
+ buf.set_default();
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.buf = [0xFFu8; 8];
- let (prefix, lv) =
- LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
+ buf.t = [0xFFu8; 8];
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
assert!(prefix.is_empty());
- test_new_helper(lv);
- }
-
- // a buffer with alignment 8 and length 16
- let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
- // buf.buf should be aligned to 8 and have a length which is a multiple
- // of size_of::<u64>(), so this should always succeed
- test_new_helper_slice(LayoutVerified::<_, [u64]>::new_slice(&mut buf.buf[..]).unwrap(), 2);
- buf.buf = [0xFFu8; 16];
- test_new_helper_slice(
- LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[..]).unwrap(),
- 2,
- );
+ test_new_helper(r);
+ }
+
+ // A buffer with alignment 8 and length 16.
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ // `buf.t` should be aligned to 8 and have a length which is a multiple
+ // of `size_of::<AU64>()`, so this should always succeed.
+ test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 2);
+ buf.t = [0xFFu8; 16];
+ test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 2);
{
- buf.clear_buf();
- let (lv, suffix) =
- LayoutVerified::<_, [u64]>::new_slice_from_prefix(&mut buf.buf[..], 1).unwrap();
+ buf.set_default();
+ let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
assert_eq!(suffix, [0; 8]);
- test_new_helper_slice(lv, 1);
+ test_new_helper_slice(r, 1);
}
{
- buf.buf = [0xFFu8; 16];
- let (lv, suffix) =
- LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[..], 1)
- .unwrap();
+ buf.t = [0xFFu8; 16];
+ let (r, suffix) =
+ Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
assert_eq!(suffix, [0xFF; 8]);
- test_new_helper_slice(lv, 1);
+ test_new_helper_slice(r, 1);
}
{
- buf.clear_buf();
- let (prefix, lv) =
- LayoutVerified::<_, [u64]>::new_slice_from_suffix(&mut buf.buf[..], 1).unwrap();
+ buf.set_default();
+ let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
assert_eq!(prefix, [0; 8]);
- test_new_helper_slice(lv, 1);
+ test_new_helper_slice(r, 1);
}
{
- buf.buf = [0xFFu8; 16];
- let (prefix, lv) =
- LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[..], 1)
- .unwrap();
+ buf.t = [0xFFu8; 16];
+ let (prefix, r) =
+ Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
assert_eq!(prefix, [0xFF; 8]);
- test_new_helper_slice(lv, 1);
+ test_new_helper_slice(r, 1);
}
}
#[test]
fn test_new_unaligned_sized() {
// Test that an unaligned, properly-sized buffer works for
- // new_unaligned, new_unaligned_from_prefix, and
- // new_unaligned_from_suffix, and that new_unaligned_from_prefix
- // new_unaligned_from_suffix return empty slices. Test that an unaligned
- // buffer whose length is a multiple of the element size works for
- // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
- // memory.
+ // `new_unaligned`, `new_unaligned_from_prefix`, and
+ // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix`
+ // `new_unaligned_from_suffix` return empty slices. Test that an
+ // unaligned buffer whose length is a multiple of the element size works
+ // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes
+ // the memory.
let mut buf = [0u8; 8];
- test_new_helper_unaligned(
- LayoutVerified::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap(),
- );
+ test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
buf = [0xFFu8; 8];
- test_new_helper_unaligned(
- LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap(),
- );
+ test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
{
- // in a block so that lv and suffix don't live too long
+ // In a block so that `r` and `suffix` don't live too long.
buf = [0u8; 8];
- let (lv, suffix) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
+ let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 8];
- let (lv, suffix) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
- .unwrap();
+ let (r, suffix) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0u8; 8];
- let (prefix, lv) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
+ let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 8];
- let (prefix, lv) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
- .unwrap();
+ let (prefix, r) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
let mut buf = [0u8; 16];
- // buf.buf should be aligned to 8 and have a length which is a multiple
- // of size_of::<u64>(), so this should always succeed
+ // `buf.t` should be aligned to 8 and have a length which is a multiple
+ // of `size_of::AU64>()`, so this should always succeed.
test_new_helper_slice_unaligned(
- LayoutVerified::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
+ Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
16,
);
buf = [0xFFu8; 16];
test_new_helper_slice_unaligned(
- LayoutVerified::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
+ Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
16,
);
{
buf = [0u8; 16];
- let (lv, suffix) =
- LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8)
- .unwrap();
+ let (r, suffix) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
assert_eq!(suffix, [0; 8]);
- test_new_helper_slice_unaligned(lv, 8);
+ test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0xFFu8; 16];
- let (lv, suffix) =
- LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8)
- .unwrap();
+ let (r, suffix) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
assert_eq!(suffix, [0xFF; 8]);
- test_new_helper_slice_unaligned(lv, 8);
+ test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0u8; 16];
- let (prefix, lv) =
- LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8)
- .unwrap();
+ let (prefix, r) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
assert_eq!(prefix, [0; 8]);
- test_new_helper_slice_unaligned(lv, 8);
+ test_new_helper_slice_unaligned(r, 8);
}
{
buf = [0xFFu8; 16];
- let (prefix, lv) =
- LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8)
- .unwrap();
+ let (prefix, r) =
+ Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
assert_eq!(prefix, [0xFF; 8]);
- test_new_helper_slice_unaligned(lv, 8);
+ test_new_helper_slice_unaligned(r, 8);
}
}
#[test]
fn test_new_oversized() {
// Test that a properly-aligned, overly-sized buffer works for
- // new_from_prefix and new_from_suffix, and that they return the
- // remainder and prefix of the slice respectively. Test that xxx_zeroed
- // behaves the same, and zeroes the memory.
+ // `new_from_prefix` and `new_from_suffix`, and that they return the
+ // remainder and prefix of the slice respectively. Test that
+ // `xxx_zeroed` behaves the same, and zeroes the memory.
- let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
+ let mut buf = Align::<[u8; 16], AU64>::default();
{
- // in a block so that lv and suffix don't live too long
- // buf.buf should be aligned to 8, so this should always succeed
- let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
+ // In a block so that `r` and `suffix` don't live too long. `buf.t`
+ // should be aligned to 8, so this should always succeed.
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
assert_eq!(suffix.len(), 8);
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.buf = [0xFFu8; 16];
- // buf.buf should be aligned to 8, so this should always succeed
- let (lv, suffix) =
- LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
- // assert that the suffix wasn't zeroed
+ buf.t = [0xFFu8; 16];
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
+ // Assert that the suffix wasn't zeroed.
assert_eq!(suffix, &[0xFFu8; 8]);
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.clear_buf();
- // buf.buf should be aligned to 8, so this should always succeed
- let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
+ buf.set_default();
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
assert_eq!(prefix.len(), 8);
- test_new_helper(lv);
+ test_new_helper(r);
}
{
- buf.buf = [0xFFu8; 16];
- // buf.buf should be aligned to 8, so this should always succeed
- let (prefix, lv) =
- LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
- // assert that the prefix wasn't zeroed
+ buf.t = [0xFFu8; 16];
+ // `buf.t` should be aligned to 8, so this should always succeed.
+ let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
+ // Assert that the prefix wasn't zeroed.
assert_eq!(prefix, &[0xFFu8; 8]);
- test_new_helper(lv);
+ test_new_helper(r);
}
}
#[test]
fn test_new_unaligned_oversized() {
// Test than an unaligned, overly-sized buffer works for
- // new_unaligned_from_prefix and new_unaligned_from_suffix, and that
+ // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that
// they return the remainder and prefix of the slice respectively. Test
- // that xxx_zeroed behaves the same, and zeroes the memory.
+ // that `xxx_zeroed` behaves the same, and zeroes the memory.
let mut buf = [0u8; 16];
{
- // in a block so that lv and suffix don't live too long
- let (lv, suffix) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
+ // In a block so that `r` and `suffix` don't live too long.
+ let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert_eq!(suffix.len(), 8);
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 16];
- let (lv, suffix) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
- .unwrap();
- // assert that the suffix wasn't zeroed
+ let (r, suffix) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
+ // Assert that the suffix wasn't zeroed.
assert_eq!(suffix, &[0xFF; 8]);
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0u8; 16];
- let (prefix, lv) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
+ let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert_eq!(prefix.len(), 8);
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
{
buf = [0xFFu8; 16];
- let (prefix, lv) =
- LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
- .unwrap();
- // assert that the prefix wasn't zeroed
+ let (prefix, r) =
+ Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
+ // Assert that the prefix wasn't zeroed.
assert_eq!(prefix, &[0xFF; 8]);
- test_new_helper_unaligned(lv);
+ test_new_helper_unaligned(r);
}
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_new_error() {
- // fail because the buffer is too large
-
- // a buffer with an alignment of 8
- let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
- // buf.buf should be aligned to 8, so only the length check should fail
- assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
-
- // fail because the buffer is too small
-
- // a buffer with an alignment of 8
- let mut buf = AlignedBuffer::<u64, [u8; 4]>::default();
- // buf.buf should be aligned to 8, so only the length check should fail
- assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.buf[..])
+ // Fail because the buffer is too large.
+
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ // `buf.t` should be aligned to 8, so only the length check should fail.
+ assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the buffer is too small.
+
+ // A buffer with an alignment of 8.
+ let mut buf = Align::<[u8; 4], AU64>::default();
+ // `buf.t` should be aligned to 8, so only the length check should fail.
+ assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the length is not a multiple of the element size.
+
+ let mut buf = Align::<[u8; 12], AU64>::default();
+ // `buf.t` has length 12, but element size is 8.
+ assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail because the buffer is too short.
+ let mut buf = Align::<[u8; 12], AU64>::default();
+ // `buf.t` has length 12, but the element size is 8 (and we're expecting
+ // two of them).
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
.is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.buf[..])
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
.is_none());
- // fail because the length is not a multiple of the element size
-
- let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
- // buf.buf has length 12, but element size is 8
- assert!(LayoutVerified::<_, [u64]>::new_slice(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned(&buf.buf[..]).is_none());
- assert!(
- LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.buf[..]).is_none()
- );
-
- // fail beacuse the buffer is too short.
- let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
- // buf.buf has length 12, but the element size is 8 (and we're expecting two of them).
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[..], 2).is_none());
- assert!(
- LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[..], 2).is_none()
- );
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[..], 2).is_none());
- assert!(
- LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[..], 2).is_none()
- );
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.buf[..], 2)
+ // Fail because the alignment is insufficient.
+
+ // A buffer with an alignment of 8. An odd buffer size is chosen so that
+ // the last byte of the buffer has odd alignment.
+ let mut buf = Align::<[u8; 13], AU64>::default();
+ // Slicing from 1, we get a buffer with size 12 (so the length check
+ // should succeed) but an alignment of only 1, which is insufficient.
+ assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
+ // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use
+ // the suffix of the slice, which has odd alignment.
+ assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
+ assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
+
+ // Fail due to arithmetic overflow.
+
+ let mut buf = Align::<[u8; 16], AU64>::default();
+ let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
.is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
- &mut buf.buf[..],
- 2
- )
- .is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.buf[..], 2)
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
+ assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
.is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
- &mut buf.buf[..],
- 2
- )
- .is_none());
-
- // fail because the alignment is insufficient
-
- // a buffer with an alignment of 8
- let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
- // slicing from 4, we get a buffer with size 8 (so the length check
- // should succeed) but an alignment of only 4, which is insufficient
- assert!(LayoutVerified::<_, u64>::new(&buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice(&buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[4..]).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[4..], 1).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[4..], 1)
- .is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[4..], 1).is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[4..], 1)
- .is_none());
- // slicing from 4 should be unnecessary because new_from_suffix[_zeroed]
- // use the suffix of the slice
- assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
- assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
-
- // fail due to arithmetic overflow
-
- let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
- let unreasonable_len = std::usize::MAX / mem::size_of::<u64>() + 1;
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[..], unreasonable_len)
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
.is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(
- &mut buf.buf[..],
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
+ &mut buf.t[..],
unreasonable_len
)
.is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[..], unreasonable_len)
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
.is_none());
- assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(
- &mut buf.buf[..],
- unreasonable_len
- )
- .is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(
- &buf.buf[..],
- unreasonable_len
- )
- .is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
- &mut buf.buf[..],
- unreasonable_len
- )
- .is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(
- &buf.buf[..],
- unreasonable_len
- )
- .is_none());
- assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
- &mut buf.buf[..],
+ assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
+ &mut buf.t[..],
unreasonable_len
)
.is_none());
}
- // Tests for ensuring that, if a ZST is passed into a slice-like function, we always
- // panic. Since these tests need to be separate per-function, and they tend to take
- // up a lot of space, we generate them using a macro in a submodule instead. The
- // submodule ensures that we can just re-use the name of the function under test for
- // the name of the test itself.
+ // Tests for ensuring that, if a ZST is passed into a slice-like function,
+ // we always panic. Since these tests need to be separate per-function, and
+ // they tend to take up a lot of space, we generate them using a macro in a
+ // submodule instead. The submodule ensures that we can just re-use the name
+ // of the function under test for the name of the test itself.
mod test_zst_panics {
macro_rules! zst_test {
- ($name:ident($($tt:tt)*)) => {
+ ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
#[test]
- #[should_panic = "assertion failed"]
+ #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
fn $name() {
let mut buffer = [0u8];
- let lv = $crate::LayoutVerified::<_, [()]>::$name(&mut buffer[..], $($tt)*);
- unreachable!("should have panicked, got {:?}", lv);
+ let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
+ unreachable!("should have panicked, got {:?}", r);
}
}
}
- zst_test!(new_slice());
- zst_test!(new_slice_zeroed());
- zst_test!(new_slice_from_prefix(1));
- zst_test!(new_slice_from_prefix_zeroed(1));
- zst_test!(new_slice_from_suffix(1));
- zst_test!(new_slice_from_suffix_zeroed(1));
- zst_test!(new_slice_unaligned());
- zst_test!(new_slice_unaligned_zeroed());
- zst_test!(new_slice_unaligned_from_prefix(1));
- zst_test!(new_slice_unaligned_from_prefix_zeroed(1));
- zst_test!(new_slice_unaligned_from_suffix(1));
- zst_test!(new_slice_unaligned_from_suffix_zeroed(1));
+ zst_test!(new_slice(), "new_slice");
+ zst_test!(new_slice_zeroed(), "new_slice");
+ zst_test!(new_slice_from_prefix(1), "new_slice");
+ zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
+ zst_test!(new_slice_from_suffix(1), "new_slice");
+ zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
+ zst_test!(new_slice_unaligned(), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
+ zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
}
#[test]
fn test_as_bytes_methods() {
- #[derive(Debug, Eq, PartialEq, FromBytes, AsBytes)]
+ /// Run a series of tests by calling `AsBytes` methods on `t`.
+ ///
+ /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
+ /// before `t` has been modified. `post_mutation` is the expected
+ /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]`
+ /// has had its bits flipped (by applying `^= 0xFF`).
+ ///
+ /// `N` is the size of `t` in bytes.
+ fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
+ t: &mut T,
+ bytes: &[u8],
+ post_mutation: &T,
+ ) {
+ // Test that we can access the underlying bytes, and that we get the
+ // right bytes and the right number of bytes.
+ assert_eq!(t.as_bytes(), bytes);
+
+ // Test that changes to the underlying byte slices are reflected in
+ // the original object.
+ t.as_bytes_mut()[0] ^= 0xFF;
+ assert_eq!(t, post_mutation);
+ t.as_bytes_mut()[0] ^= 0xFF;
+
+ // `write_to` rejects slices that are too small or too large.
+ assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
+ assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
+
+ // `write_to` works as expected.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_prefix` rejects slices that are too small.
+ assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
+
+ // `write_to_prefix` works with exact-sized slices.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_prefix` works with too-large slices, and any bytes past
+ // the prefix aren't modified.
+ let mut too_many_bytes = vec![0; N + 1];
+ too_many_bytes[N] = 123;
+ assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
+ assert_eq!(&too_many_bytes[..N], t.as_bytes());
+ assert_eq!(too_many_bytes[N], 123);
+
+ // `write_to_suffix` rejects slices that are too small.
+ assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
+
+ // `write_to_suffix` works with exact-sized slices.
+ let mut bytes = [0; N];
+ assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
+ assert_eq!(bytes, t.as_bytes());
+
+ // `write_to_suffix` works with too-large slices, and any bytes
+ // before the suffix aren't modified.
+ let mut too_many_bytes = vec![0; N + 1];
+ too_many_bytes[0] = 123;
+ assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
+ assert_eq!(&too_many_bytes[1..], t.as_bytes());
+ assert_eq!(too_many_bytes[0], 123);
+ }
+
+ #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: u32,
- b: u32,
+ b: Wrapping<u32>,
+ c: Option<NonZeroU32>,
}
- let mut foo = Foo { a: 1, b: 2 };
- // Test that we can access the underlying bytes, and that we get the
- // right bytes and the right number of bytes.
- assert_eq!(foo.as_bytes(), [1, 0, 0, 0, 2, 0, 0, 0]);
- // Test that changes to the underlying byte slices are reflected in the
- // original object.
- foo.as_bytes_mut()[0] = 3;
- assert_eq!(foo, Foo { a: 3, b: 2 });
-
- // Do the same tests for a slice, which ensures that this logic works
- // for unsized types as well.
- let foo = &mut [Foo { a: 1, b: 2 }, Foo { a: 3, b: 4 }];
- assert_eq!(foo.as_bytes(), [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
- foo.as_bytes_mut()[8] = 5;
- assert_eq!(foo, &mut [Foo { a: 1, b: 2 }, Foo { a: 5, b: 4 }]);
+ let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
+ vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
+ } else {
+ vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
+ };
+ let post_mutation_expected_a =
+ if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
+ test::<_, 12>(
+ &mut Foo { a: 1, b: Wrapping(2), c: None },
+ expected_bytes.as_bytes(),
+ &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
+ );
+ test::<_, 3>(
+ Unsized::from_mut_slice(&mut [1, 2, 3]),
+ &[1, 2, 3],
+ Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
+ );
}
#[test]
fn test_array() {
- // This is a hack, as per above in `test_as_bytes_methods`.
- mod zerocopy {
- pub use crate::*;
- }
- #[derive(FromBytes, AsBytes)]
+ #[derive(FromZeroes, FromBytes, AsBytes)]
#[repr(C)]
struct Foo {
a: [u16; 33],
@@ -2645,209 +3915,172 @@ mod tests {
#[test]
fn test_display_debug() {
- let buf = AlignedBuffer::<u64, [u8; 8]>::default();
- let lv = LayoutVerified::<_, u64>::new(&buf.buf[..]).unwrap();
- assert_eq!(format!("{}", lv), "0");
- assert_eq!(format!("{:?}", lv), "LayoutVerified(0)");
+ let buf = Align::<[u8; 8], u64>::default();
+ let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
+ assert_eq!(format!("{}", r), "0");
+ assert_eq!(format!("{:?}", r), "Ref(0)");
- let buf = AlignedBuffer::<u64, [u8; 8]>::default();
- let lv = LayoutVerified::<_, [u64]>::new_slice(&buf.buf[..]).unwrap();
- assert_eq!(format!("{:?}", lv), "LayoutVerified([0])");
+ let buf = Align::<[u8; 8], u64>::default();
+ let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
+ assert_eq!(format!("{:?}", r), "Ref([0])");
}
#[test]
fn test_eq() {
- let buf = [0u8; 8];
- let lv1 = LayoutVerified::<_, u64>::new(&buf[..]).unwrap();
- let lv2 = LayoutVerified::<_, u64>::new(&buf[..]).unwrap();
- assert_eq!(lv1, lv2);
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 0_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert_eq!(r1, r2);
}
#[test]
fn test_ne() {
- let buf1 = [0u8; 8];
- let lv1 = LayoutVerified::<_, u64>::new(&buf1[..]).unwrap();
- let buf2 = [1u8; 8];
- let lv2 = LayoutVerified::<_, u64>::new(&buf2[..]).unwrap();
- assert_ne!(lv1, lv2);
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 1_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert_ne!(r1, r2);
}
#[test]
fn test_ord() {
- let buf1 = [0u8; 8];
- let lv1 = LayoutVerified::<_, u64>::new(&buf1[..]).unwrap();
- let buf2 = [1u8; 8];
- let lv2 = LayoutVerified::<_, u64>::new(&buf2[..]).unwrap();
- assert!(lv1 < lv2);
+ let buf1 = 0_u64;
+ let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
+ let buf2 = 1_u64;
+ let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
+ assert!(r1 < r2);
}
#[test]
fn test_new_zeroed() {
+ assert!(!bool::new_zeroed());
assert_eq!(u64::new_zeroed(), 0);
- assert_eq!(<()>::new_zeroed(), ());
- }
-
- #[test]
- fn test_new_box_zeroed() {
- assert_eq!(*u64::new_box_zeroed(), 0);
- }
-
- #[test]
- fn test_new_box_zeroed_array() {
- drop(<[u32; 0x1000]>::new_box_zeroed());
- }
-
- #[test]
- fn test_new_box_zeroed_zst() {
- assert_eq!(*<()>::new_box_zeroed(), ());
- }
-
- #[test]
- fn test_new_box_slice_zeroed() {
- let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
- assert_eq!(s.len(), 3);
- assert_eq!(&*s, &[0, 0, 0]);
- s[1] = 3;
- assert_eq!(&*s, &[0, 3, 0]);
- }
-
- #[test]
- fn test_new_box_slice_zeroed_empty() {
- let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
- assert_eq!(s.len(), 0);
+ // This test exists in order to exercise unsafe code, especially when
+ // running under Miri.
+ #[allow(clippy::unit_cmp)]
+ {
+ assert_eq!(<()>::new_zeroed(), ());
+ }
}
#[test]
- fn test_new_box_slice_zeroed_zst() {
- let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
- assert_eq!(s.len(), 3);
- assert!(s.get(10).is_none());
- assert_eq!(s[1], ());
- s[2] = ();
- }
+ fn test_transparent_packed_generic_struct() {
+ #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
+ #[repr(transparent)]
+ struct Foo<T> {
+ _t: T,
+ _phantom: PhantomData<()>,
+ }
- #[test]
- fn test_new_box_slice_zeroed_zst_empty() {
- let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
- assert_eq!(s.len(), 0);
- }
+ assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
+ assert_impl_all!(Foo<u8>: Unaligned);
- #[test]
- fn test_extend_vec_zeroed() {
- // test extending when there is an existing allocation
- let mut v: Vec<u64> = Vec::with_capacity(3);
- v.push(100);
- v.push(200);
- v.push(300);
- extend_vec_zeroed(&mut v, 3);
- assert_eq!(v.len(), 6);
- assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
- drop(v);
-
- // test extending when there is no existing allocation
- let mut v: Vec<u64> = Vec::new();
- extend_vec_zeroed(&mut v, 3);
- assert_eq!(v.len(), 3);
- assert_eq!(&*v, &[0, 0, 0]);
- drop(v);
- }
+ #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
+ #[repr(packed)]
+ struct Bar<T, U> {
+ _t: T,
+ _u: U,
+ }
- #[test]
- fn test_extend_vec_zeroed_zst() {
- // test extending when there is an existing (fake) allocation
- let mut v: Vec<()> = Vec::with_capacity(3);
- v.push(());
- v.push(());
- v.push(());
- extend_vec_zeroed(&mut v, 3);
- assert_eq!(v.len(), 6);
- assert_eq!(&*v, &[(), (), (), (), (), ()]);
- drop(v);
-
- // test extending when there is no existing (fake) allocation
- let mut v: Vec<()> = Vec::new();
- extend_vec_zeroed(&mut v, 3);
- assert_eq!(&*v, &[(), (), ()]);
- drop(v);
+ assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
}
#[test]
- fn test_insert_vec_zeroed() {
- // insert at start (no existing allocation)
- let mut v: Vec<u64> = Vec::new();
- insert_vec_zeroed(&mut v, 0, 2);
- assert_eq!(v.len(), 2);
- assert_eq!(&*v, &[0, 0]);
- drop(v);
-
- // insert at start
- let mut v: Vec<u64> = Vec::with_capacity(3);
- v.push(100);
- v.push(200);
- v.push(300);
- insert_vec_zeroed(&mut v, 0, 2);
- assert_eq!(v.len(), 5);
- assert_eq!(&*v, &[0, 0, 100, 200, 300]);
- drop(v);
-
- // insert at middle
- let mut v: Vec<u64> = Vec::with_capacity(3);
- v.push(100);
- v.push(200);
- v.push(300);
- insert_vec_zeroed(&mut v, 1, 1);
- assert_eq!(v.len(), 4);
- assert_eq!(&*v, &[100, 0, 200, 300]);
- drop(v);
-
- // insert at end
- let mut v: Vec<u64> = Vec::with_capacity(3);
- v.push(100);
- v.push(200);
- v.push(300);
- insert_vec_zeroed(&mut v, 3, 1);
- assert_eq!(v.len(), 4);
- assert_eq!(&*v, &[100, 200, 300, 0]);
- drop(v);
- }
+ fn test_impls() {
+ // Asserts that `$ty` implements any `$trait` and doesn't implement any
+ // `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
+ macro_rules! assert_impls {
+ ($ty:ty: $trait:ident) => {
+ #[allow(dead_code)]
+ const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
+ };
+ ($ty:ty: !$trait:ident) => {
+ #[allow(dead_code)]
+ const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
+ };
+ ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
+ $(
+ assert_impls!($ty: $trait);
+ )*
+
+ $(
+ assert_impls!($ty: !$negative_trait);
+ )*
+ };
+ }
- #[test]
- fn test_insert_vec_zeroed_zst() {
- // insert at start (no existing fake allocation)
- let mut v: Vec<()> = Vec::new();
- insert_vec_zeroed(&mut v, 0, 2);
- assert_eq!(v.len(), 2);
- assert_eq!(&*v, &[(), ()]);
- drop(v);
-
- // insert at start
- let mut v: Vec<()> = Vec::with_capacity(3);
- v.push(());
- v.push(());
- v.push(());
- insert_vec_zeroed(&mut v, 0, 2);
- assert_eq!(v.len(), 5);
- assert_eq!(&*v, &[(), (), (), (), ()]);
- drop(v);
-
- // insert at middle
- let mut v: Vec<()> = Vec::with_capacity(3);
- v.push(());
- v.push(());
- v.push(());
- insert_vec_zeroed(&mut v, 1, 1);
- assert_eq!(v.len(), 4);
- assert_eq!(&*v, &[(), (), (), ()]);
- drop(v);
-
- // insert at end
- let mut v: Vec<()> = Vec::with_capacity(3);
- v.push(());
- v.push(());
- v.push(());
- insert_vec_zeroed(&mut v, 3, 1);
- assert_eq!(v.len(), 4);
- assert_eq!(&*v, &[(), (), (), ()]);
- drop(v);
+ assert_impls!((): FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(u8: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(i8: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(u16: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i16: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u32: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i32: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u64: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i64: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(u128: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(i128: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(usize: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(isize: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(f32: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(f64: FromZeroes, FromBytes, AsBytes, !Unaligned);
+
+ assert_impls!(bool: FromZeroes, AsBytes, Unaligned, !FromBytes);
+ assert_impls!(char: FromZeroes, AsBytes, !FromBytes, !Unaligned);
+ assert_impls!(str: FromZeroes, AsBytes, Unaligned, !FromBytes);
+
+ assert_impls!(NonZeroU8: AsBytes, Unaligned, !FromZeroes, !FromBytes);
+ assert_impls!(NonZeroI8: AsBytes, Unaligned, !FromZeroes, !FromBytes);
+ assert_impls!(NonZeroU16: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI16: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU32: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI32: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU64: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI64: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroU128: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroI128: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroUsize: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+ assert_impls!(NonZeroIsize: AsBytes, !FromZeroes, !FromBytes, !Unaligned);
+
+ assert_impls!(Option<NonZeroU8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Option<NonZeroI8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Option<NonZeroU16>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI16>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU32>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI32>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU64>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI64>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroU128>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroI128>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroUsize>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+ assert_impls!(Option<NonZeroIsize>: FromZeroes, FromBytes, AsBytes, !Unaligned);
+
+ // Implements none of the ZC traits.
+ struct NotZerocopy;
+
+ assert_impls!(PhantomData<NotZerocopy>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(PhantomData<[u8]>: FromZeroes, FromBytes, AsBytes, Unaligned);
+
+ assert_impls!(ManuallyDrop<u8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(ManuallyDrop<[u8]>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(ManuallyDrop<NotZerocopy>: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!(ManuallyDrop<[NotZerocopy]>: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(MaybeUninit<u8>: FromZeroes, FromBytes, Unaligned, !AsBytes);
+ assert_impls!(MaybeUninit<NotZerocopy>: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(Wrapping<u8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Wrapping<NotZerocopy>: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+
+ assert_impls!(Unalign<u8>: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!(Unalign<NotZerocopy>: Unaligned, !FromZeroes, !FromBytes, !AsBytes);
+
+ assert_impls!([u8]: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!([NotZerocopy]: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!([u8; 0]: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!([NotZerocopy; 0]: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
+ assert_impls!([u8; 1]: FromZeroes, FromBytes, AsBytes, Unaligned);
+ assert_impls!([NotZerocopy; 1]: !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
}
}
diff --git a/src/macros.rs b/src/macros.rs
new file mode 100644
index 0000000..aebc8d6
--- /dev/null
+++ b/src/macros.rs
@@ -0,0 +1,250 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/// Documents multiple unsafe blocks with a single safety comment.
+///
+/// Invoked as:
+///
+/// ```rust,ignore
+/// safety_comment! {
+/// // Non-doc comments come first.
+/// /// SAFETY:
+/// /// Safety comment starts on its own line.
+/// macro_1!(args);
+/// macro_2! { args };
+/// /// SAFETY:
+/// /// Subsequent safety comments are allowed but not required.
+/// macro_3! { args };
+/// }
+/// ```
+///
+/// The macro invocations are emitted, each decorated with the following
+/// attribute: `#[allow(clippy::undocumented_unsafe_blocks)]`.
+macro_rules! safety_comment {
+ (#[doc = r" SAFETY:"] $($(#[doc = $_doc:literal])* $macro:ident!$args:tt;)*) => {
+ #[allow(clippy::undocumented_unsafe_blocks)]
+ const _: () = { $($macro!$args;)* };
+ }
+}
+
+/// Unsafely implements trait(s) for a type.
+macro_rules! unsafe_impl {
+ // Implement `$trait` for `$ty` with no bounds.
+ ($ty:ty: $trait:ty) => {
+ unsafe impl $trait for $ty { #[allow(clippy::missing_inline_in_public_items)] fn only_derive_is_allowed_to_implement_this_trait() {} }
+ };
+ // Implement all `$traits` for `$ty` with no bounds.
+ ($ty:ty: $($traits:ty),*) => {
+ $( unsafe_impl!($ty: $traits); )*
+ };
+ // This arm is identical to the following one, except it contains a
+ // preceding `const`. If we attempt to handle these with a single arm, there
+ // is an inherent ambiguity between `const` (the keyword) and `const` (the
+ // ident match for `$tyvar:ident`).
+ //
+ // To explain how this works, consider the following invocation:
+ //
+ // unsafe_impl!(const N: usize, T: ?Sized + Copy => Clone for Foo<T>);
+ //
+ // In this invocation, here are the assignments to meta-variables:
+ //
+ // |---------------|------------|
+ // | Meta-variable | Assignment |
+ // |---------------|------------|
+ // | $constname | N |
+ // | $constty | usize |
+ // | $tyvar | T |
+ // | $optbound | Sized |
+ // | $bound | Copy |
+ // | $trait | Clone |
+ // | $ty | Foo<T> |
+ // |---------------|------------|
+ //
+ // The following arm has the same behavior with the exception of the lack of
+ // support for a leading `const` parameter.
+ (
+ const $constname:ident : $constty:ident $(,)?
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ unsafe_impl!(
+ @inner
+ @const $constname: $constty,
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty
+ );
+ };
+ (
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ unsafe_impl!(
+ @inner
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty
+ );
+ };
+ (
+ @inner
+ $(@const $constname:ident : $constty:ident,)*
+ $($tyvar:ident $(: $(? $optbound:ident +)* + $($bound:ident +)* )?,)*
+ => $trait:ident for $ty:ty
+ ) => {
+ unsafe impl<$(const $constname: $constty,)* $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> $trait for $ty {
+ #[allow(clippy::missing_inline_in_public_items)]
+ fn only_derive_is_allowed_to_implement_this_trait() {}
+ }
+ };
+}
+
+/// Implements trait(s) for a type or verifies the given implementation by
+/// referencing an existing (derived) implementation.
+///
+/// This macro exists so that we can provide zerocopy-derive as an optional
+/// dependency and still get the benefit of using its derives to validate that
+/// our trait impls are sound.
+///
+/// When compiling without `--cfg 'feature = "derive"` and without `--cfg test`,
+/// `impl_or_verify!` emits the provided trait impl. When compiling with either
+/// of those cfgs, it is expected that the type in question is deriving the
+/// traits instead. In this case, `impl_or_verify!` emits code which validates
+/// that the given trait impl is at least as restrictive as the the impl emitted
+/// by the custom derive. This has the effect of confirming that the impl which
+/// is emitted when the `derive` feature is disabled is actually sound (on the
+/// assumption that the impl emitted by the custom derive is sound).
+///
+/// The caller is still required to provide a safety comment (e.g. using the
+/// `safety_comment!` macro) . The reason for this restriction is that, while
+/// `impl_or_verify!` can guarantee that the provided impl is sound when it is
+/// compiled with the appropriate cfgs, there is no way to guarantee that it is
+/// ever compiled with those cfgs. In particular, it would be possible to
+/// accidentally place an `impl_or_verify!` call in a context that is only ever
+/// compiled when the `derive` feature is disabled. If that were to happen,
+/// there would be nothing to prevent an unsound trait impl from being emitted.
+/// Requiring a safety comment reduces the likelihood of emitting an unsound
+/// impl in this case, and also provides useful documentation for readers of the
+/// code.
+///
+/// ## Example
+///
+/// ```rust,ignore
+/// // Note that these derives are gated by `feature = "derive"`
+/// #[cfg_attr(any(feature = "derive", test), derive(FromZeroes, FromBytes, AsBytes, Unaligned))]
+/// #[repr(transparent)]
+/// struct Wrapper<T>(T);
+///
+/// safety_comment! {
+/// /// SAFETY:
+/// /// `Wrapper<T>` is `repr(transparent)`, so it is sound to implement any
+/// /// zerocopy trait if `T` implements that trait.
+/// impl_or_verify!(T: FromZeroes => FromZeroes for Wrapper<T>);
+/// impl_or_verify!(T: FromBytes => FromBytes for Wrapper<T>);
+/// impl_or_verify!(T: AsBytes => AsBytes for Wrapper<T>);
+/// impl_or_verify!(T: Unaligned => Unaligned for Wrapper<T>);
+/// }
+/// ```
+macro_rules! impl_or_verify {
+ // The following two match arms follow the same pattern as their
+ // counterparts in `unsafe_impl!`; see the documentation on those arms for
+ // more details.
+ (
+ const $constname:ident : $constty:ident $(,)?
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ impl_or_verify!(@impl { unsafe_impl!(
+ const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
+ ); });
+ impl_or_verify!(@verify $trait, {
+ impl<const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ });
+ };
+ (
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ impl_or_verify!(@impl { unsafe_impl!(
+ $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
+ ); });
+ impl_or_verify!(@verify $trait, {
+ impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ });
+ };
+ (
+ $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
+ => $trait:ident for $ty:ty
+ ) => {
+ unsafe_impl!(
+ @inner
+ $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
+ => $trait for $ty
+ );
+ };
+ (@impl $impl_block:tt) => {
+ #[cfg(not(any(feature = "derive", test)))]
+ const _: () = { $impl_block };
+ };
+ (@verify $trait:ident, $impl_block:tt) => {
+ #[cfg(any(feature = "derive", test))]
+ const _: () = {
+ trait Subtrait: $trait {}
+ $impl_block
+ };
+ };
+}
+
+/// Implements `KnownLayout` for a sized type.
+macro_rules! impl_known_layout {
+ ($(const $constvar:ident : $constty:ty, $tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
+ $(impl_known_layout!(@inner const $constvar: $constty, $tyvar $(: ?$optbound)? => $ty);)*
+ };
+ ($($tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
+ $(impl_known_layout!(@inner , $tyvar $(: ?$optbound)? => $ty);)*
+ };
+ ($($ty:ty),*) => { $(impl_known_layout!(@inner , => $ty);)* };
+ (@inner $(const $constvar:ident : $constty:ty)? , $($tyvar:ident $(: ?$optbound:ident)?)? => $ty:ty) => {
+ impl<$(const $constvar : $constty,)? $($tyvar $(: ?$optbound)?)?> sealed::KnownLayoutSealed for $ty {}
+ // SAFETY: Delegates safety to `DstLayout::for_type`.
+ unsafe impl<$(const $constvar : $constty,)? $($tyvar $(: ?$optbound)?)?> KnownLayout for $ty {
+ const LAYOUT: DstLayout = DstLayout::for_type::<$ty>();
+ }
+ };
+}
+
+/// Implements `KnownLayout` for a type in terms of the implementation of
+/// another type with the same representation.
+///
+/// # Safety
+///
+/// - `$ty` and `$repr` must have the same:
+/// - Fixed prefix size
+/// - Alignment
+/// - (For DSTs) trailing slice element size
+/// - It must be valid to perform an `as` cast from `*mut $repr` to `*mut $ty`,
+/// and this operation must preserve referent size (ie, `size_of_val_raw`).
+macro_rules! unsafe_impl_known_layout {
+ ($($tyvar:ident: ?Sized + KnownLayout =>)? #[repr($repr:ty)] $ty:ty) => {
+ impl<$($tyvar: ?Sized + KnownLayout)?> sealed::KnownLayoutSealed for $ty {}
+ unsafe impl<$($tyvar: ?Sized + KnownLayout)?> KnownLayout for $ty {
+ const LAYOUT: DstLayout = <$repr as KnownLayout>::LAYOUT;
+ }
+ };
+}
+
+/// Uses `align_of` to confirm that a type or set of types have alignment 1.
+///
+/// Note that `align_of<T>` requires `T: Sized`, so this macro doesn't work for
+/// unsized types.
+macro_rules! assert_unaligned {
+ ($ty:ty) => {
+ // We only compile this assertion under `cfg(test)` to avoid taking an
+ // extra non-dev dependency (and making this crate more expensive to
+ // compile for our dependents).
+ #[cfg(test)]
+ static_assertions::const_assert_eq!(core::mem::align_of::<$ty>(), 1);
+ };
+ ($($ty:ty),*) => {
+ $(assert_unaligned!($ty);)*
+ };
+}
diff --git a/src/util.rs b/src/util.rs
new file mode 100644
index 0000000..ed810dc
--- /dev/null
+++ b/src/util.rs
@@ -0,0 +1,105 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use core::mem;
+
+pub(crate) trait AsAddress {
+ fn addr(self) -> usize;
+}
+
+impl<'a, T: ?Sized> AsAddress for &'a T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+impl<'a, T: ?Sized> AsAddress for &'a mut T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+impl<T: ?Sized> AsAddress for *const T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ // TODO(https://github.com/rust-lang/rust/issues/95228): Use `.addr()`
+ // instead of `as usize` once it's stable, and get rid of this `allow`.
+ // Currently, `as usize` is the only way to accomplish this.
+ #[allow(clippy::as_conversions)]
+ return self.cast::<()>() as usize;
+ }
+}
+
+impl<T: ?Sized> AsAddress for *mut T {
+ #[inline(always)]
+ fn addr(self) -> usize {
+ let ptr: *const T = self;
+ AsAddress::addr(ptr)
+ }
+}
+
+/// Is `t` aligned to `mem::align_of::<U>()`?
+#[inline(always)]
+pub(crate) fn aligned_to<T: AsAddress, U>(t: T) -> bool {
+ // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
+ // turn guarantees that this mod operation will not panic.
+ #[allow(clippy::arithmetic_side_effects)]
+ let remainder = t.addr() % mem::align_of::<U>();
+ remainder == 0
+}
+
+#[cfg(test)]
+pub(crate) mod testutil {
+ use core::fmt::{self, Display, Formatter};
+
+ use crate::*;
+
+ /// A `T` which is aligned to at least `align_of::<A>()`.
+ #[derive(Default)]
+ pub(crate) struct Align<T, A> {
+ pub(crate) t: T,
+ _a: [A; 0],
+ }
+
+ impl<T: Default, A> Align<T, A> {
+ pub(crate) fn set_default(&mut self) {
+ self.t = T::default();
+ }
+ }
+
+ impl<T, A> Align<T, A> {
+ pub(crate) const fn new(t: T) -> Align<T, A> {
+ Align { t, _a: [] }
+ }
+ }
+
+ // A `u64` with alignment 8.
+ //
+ // Though `u64` has alignment 8 on some platforms, it's not guaranteed.
+ // By contrast, `AU64` is guaranteed to have alignment 8.
+ #[derive(
+ FromZeroes, FromBytes, AsBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone,
+ )]
+ #[repr(C, align(8))]
+ pub(crate) struct AU64(pub(crate) u64);
+
+ impl AU64 {
+ // Converts this `AU64` to bytes using this platform's endianness.
+ pub(crate) fn to_bytes(self) -> [u8; 8] {
+ crate::transmute!(self)
+ }
+ }
+
+ impl Display for AU64 {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(&self.0, f)
+ }
+ }
+
+ impl_known_layout!(AU64);
+}
diff --git a/src/wrappers.rs b/src/wrappers.rs
new file mode 100644
index 0000000..a0e6ac7
--- /dev/null
+++ b/src/wrappers.rs
@@ -0,0 +1,497 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use core::{
+ cmp::Ordering,
+ fmt::{self, Debug, Display, Formatter},
+ hash::Hash,
+ mem::{self, ManuallyDrop},
+ ops::{Deref, DerefMut},
+ ptr,
+};
+
+use super::*;
+
+/// A type with no alignment requirement.
+///
+/// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>`
+/// has the same size and bit validity as `T`, but not necessarily the same
+/// alignment [or ABI]. This is useful if a type with an alignment requirement
+/// needs to be read from a chunk of memory which provides no alignment
+/// guarantees.
+///
+/// Since `Unalign` has no alignment requirement, the inner `T` may not be
+/// properly aligned in memory. There are five ways to access the inner `T`:
+/// - by value, using [`get`] or [`into_inner`]
+/// - by reference inside of a callback, using [`update`]
+/// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can
+/// fail if the `Unalign` does not satisfy `T`'s alignment requirement at
+/// runtime
+/// - unsafely by reference, using [`deref_unchecked`] or
+/// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that
+/// the `Unalign` satisfies `T`'s alignment requirement
+/// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or
+/// [`DerefMut::deref_mut`]
+///
+/// [or ABI]: https://github.com/google/zerocopy/issues/164
+/// [`get`]: Unalign::get
+/// [`into_inner`]: Unalign::into_inner
+/// [`update`]: Unalign::update
+/// [`try_deref`]: Unalign::try_deref
+/// [`try_deref_mut`]: Unalign::try_deref_mut
+/// [`deref_unchecked`]: Unalign::deref_unchecked
+/// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked
+// NOTE: This type is sound to use with types that need to be dropped. The
+// reason is that the compiler-generated drop code automatically moves all
+// values to aligned memory slots before dropping them in-place. This is not
+// well-documented, but it's hinted at in places like [1] and [2]. However, this
+// also means that `T` must be `Sized`; unless something changes, we can never
+// support unsized `T`. [3]
+//
+// [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646
+// [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323
+// [3] https://github.com/google/zerocopy/issues/209
+#[allow(missing_debug_implementations)]
+#[derive(Default, Copy)]
+#[cfg_attr(any(feature = "derive", test), derive(FromZeroes, FromBytes, AsBytes, Unaligned))]
+#[repr(C, packed)]
+pub struct Unalign<T>(T);
+
+safety_comment! {
+ /// SAFETY:
+ /// - `Unalign<T>` is `repr(packed)`, so it is unaligned regardless of the
+ /// alignment of `T`, and so we don't require that `T: Unaligned`
+ /// - `Unalign<T>` has the same bit validity as `T`, and so it is
+ /// `FromZeroes`, `FromBytes`, or `AsBytes` exactly when `T` is as well.
+ impl_or_verify!(T => Unaligned for Unalign<T>);
+ impl_or_verify!(T: FromZeroes => FromZeroes for Unalign<T>);
+ impl_or_verify!(T: FromBytes => FromBytes for Unalign<T>);
+ impl_or_verify!(T: AsBytes => AsBytes for Unalign<T>);
+}
+
+// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be
+// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound
+// is not sufficient to implement `Clone` for `Unalign`.
+impl<T: Copy> Clone for Unalign<T> {
+ #[inline(always)]
+ fn clone(&self) -> Unalign<T> {
+ *self
+ }
+}
+
+impl<T> Unalign<T> {
+ /// Constructs a new `Unalign`.
+ #[inline(always)]
+ pub const fn new(val: T) -> Unalign<T> {
+ Unalign(val)
+ }
+
+ /// Consumes `self`, returning the inner `T`.
+ #[inline(always)]
+ pub const fn into_inner(self) -> T {
+ // Use this instead of `mem::transmute` since the latter can't tell
+ // that `Unalign<T>` and `T` have the same size.
+ #[repr(C)]
+ union Transmute<T> {
+ u: ManuallyDrop<Unalign<T>>,
+ t: ManuallyDrop<T>,
+ }
+
+ // SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same
+ // layout as `T`. `ManuallyDrop<U>` is guaranteed to have the same
+ // layout as `U`, and so `ManuallyDrop<Unalign<T>>` has the same layout
+ // as `ManuallyDrop<T>`. Since `Transmute<T>` is `#[repr(C)]`, its `t`
+ // and `u` fields both start at the same offset (namely, 0) within the
+ // union.
+ //
+ // We do this instead of just destructuring in order to prevent
+ // `Unalign`'s `Drop::drop` from being run, since dropping is not
+ // supported in `const fn`s.
+ //
+ // TODO(https://github.com/rust-lang/rust/issues/73255): Destructure
+ // instead of using unsafe.
+ unsafe { ManuallyDrop::into_inner(Transmute { u: ManuallyDrop::new(self) }.t) }
+ }
+
+ /// Attempts to return a reference to the wrapped `T`, failing if `self` is
+ /// not properly aligned.
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then it is unsound to
+ /// return a reference to the wrapped `T`, and `try_deref` returns `None`.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements [`Deref`], and callers
+ /// may prefer [`Deref::deref`], which is infallible.
+ #[inline(always)]
+ pub fn try_deref(&self) -> Option<&T> {
+ if !crate::util::aligned_to::<_, T>(self) {
+ return None;
+ }
+
+ // SAFETY: `deref_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`, which we just checked.
+ unsafe { Some(self.deref_unchecked()) }
+ }
+
+ /// Attempts to return a mutable reference to the wrapped `T`, failing if
+ /// `self` is not properly aligned.
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then it is unsound to
+ /// return a reference to the wrapped `T`, and `try_deref_mut` returns
+ /// `None`.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements [`DerefMut`], and
+ /// callers may prefer [`DerefMut::deref_mut`], which is infallible.
+ #[inline(always)]
+ pub fn try_deref_mut(&mut self) -> Option<&mut T> {
+ if !crate::util::aligned_to::<_, T>(&*self) {
+ return None;
+ }
+
+ // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`, which we just checked.
+ unsafe { Some(self.deref_mut_unchecked()) }
+ }
+
+ /// Returns a reference to the wrapped `T` without checking alignment.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements[ `Deref`], and callers
+ /// may prefer [`Deref::deref`], which is safe.
+ ///
+ /// # Safety
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then
+ /// `self.deref_unchecked()` may cause undefined behavior.
+ #[inline(always)]
+ pub const unsafe fn deref_unchecked(&self) -> &T {
+ // SAFETY: `Unalign<T>` is `repr(transparent)`, so there is a valid `T`
+ // at the same memory location as `self`. It has no alignment guarantee,
+ // but the caller has promised that `self` is properly aligned, so we
+ // know that it is sound to create a reference to `T` at this memory
+ // location.
+ //
+ // We use `mem::transmute` instead of `&*self.get_ptr()` because
+ // dereferencing pointers is not stable in `const` on our current MSRV
+ // (1.56 as of this writing).
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Returns a mutable reference to the wrapped `T` without checking
+ /// alignment.
+ ///
+ /// If `T: Unaligned`, then `Unalign<T>` implements[ `DerefMut`], and
+ /// callers may prefer [`DerefMut::deref_mut`], which is safe.
+ ///
+ /// # Safety
+ ///
+ /// If `self` does not satisfy `mem::align_of::<T>()`, then
+ /// `self.deref_mut_unchecked()` may cause undefined behavior.
+ #[inline(always)]
+ pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T {
+ // SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at
+ // the same memory location as `self`. It has no alignment guarantee,
+ // but the caller has promised that `self` is properly aligned, so we
+ // know that the pointer itself is aligned, and thus that it is sound to
+ // create a reference to a `T` at this memory location.
+ unsafe { &mut *self.get_mut_ptr() }
+ }
+
+ /// Gets an unaligned raw pointer to the inner `T`.
+ ///
+ /// # Safety
+ ///
+ /// The returned raw pointer is not necessarily aligned to
+ /// `align_of::<T>()`. Most functions which operate on raw pointers require
+ /// those pointers to be aligned, so calling those functions with the result
+ /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
+ /// using some out-of-band mechanism. In general, the only functions which
+ /// are safe to call with this pointer are those which are explicitly
+ /// documented as being sound to use with an unaligned pointer, such as
+ /// [`read_unaligned`].
+ ///
+ /// [`read_unaligned`]: core::ptr::read_unaligned
+ #[inline(always)]
+ pub const fn get_ptr(&self) -> *const T {
+ ptr::addr_of!(self.0)
+ }
+
+ /// Gets an unaligned mutable raw pointer to the inner `T`.
+ ///
+ /// # Safety
+ ///
+ /// The returned raw pointer is not necessarily aligned to
+ /// `align_of::<T>()`. Most functions which operate on raw pointers require
+ /// those pointers to be aligned, so calling those functions with the result
+ /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
+ /// using some out-of-band mechanism. In general, the only functions which
+ /// are safe to call with this pointer are those which are explicitly
+ /// documented as being sound to use with an unaligned pointer, such as
+ /// [`read_unaligned`].
+ ///
+ /// [`read_unaligned`]: core::ptr::read_unaligned
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn get_mut_ptr(&mut self) -> *mut T {
+ ptr::addr_of_mut!(self.0)
+ }
+
+ /// Sets the inner `T`, dropping the previous value.
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn set(&mut self, t: T) {
+ *self = Unalign::new(t);
+ }
+
+ /// Updates the inner `T` by calling a function on it.
+ ///
+ /// If [`T: Unaligned`], then `Unalign<T>` implements [`DerefMut`], and that
+ /// impl should be preferred over this method when performing updates, as it
+ /// will usually be faster and more ergonomic.
+ ///
+ /// For large types, this method may be expensive, as it requires copying
+ /// `2 * size_of::<T>()` bytes. \[1\]
+ ///
+ /// \[1\] Since the inner `T` may not be aligned, it would not be sound to
+ /// invoke `f` on it directly. Instead, `update` moves it into a
+ /// properly-aligned location in the local stack frame, calls `f` on it, and
+ /// then moves it back to its original location in `self`.
+ ///
+ /// [`T: Unaligned`]: Unaligned
+ #[inline]
+ pub fn update<O, F: FnOnce(&mut T) -> O>(&mut self, f: F) -> O {
+ // On drop, this moves `copy` out of itself and uses `ptr::write` to
+ // overwrite `slf`.
+ struct WriteBackOnDrop<T> {
+ copy: ManuallyDrop<T>,
+ slf: *mut Unalign<T>,
+ }
+
+ impl<T> Drop for WriteBackOnDrop<T> {
+ fn drop(&mut self) {
+ // SAFETY: See inline comments.
+ unsafe {
+ // SAFETY: We never use `copy` again as required by
+ // `ManuallyDrop::take`.
+ let copy = ManuallyDrop::take(&mut self.copy);
+ // SAFETY: `slf` is the raw pointer value of `self`. We know
+ // it is valid for writes and properly aligned because
+ // `self` is a mutable reference, which guarantees both of
+ // these properties.
+ ptr::write(self.slf, Unalign::new(copy));
+ }
+ }
+ }
+
+ // SAFETY: We know that `self` is valid for reads, properly aligned, and
+ // points to an initialized `Unalign<T>` because it is a mutable
+ // reference, which guarantees all of these properties.
+ //
+ // Since `T: !Copy`, it would be unsound in the general case to allow
+ // both the original `Unalign<T>` and the copy to be used by safe code.
+ // We guarantee that the copy is used to overwrite the original in the
+ // `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is
+ // called before any other safe code executes, soundness is upheld.
+ // While this method can terminate in two ways (by returning normally or
+ // by unwinding due to a panic in `f`), in both cases, `write_back` is
+ // dropped - and its `drop` called - before any other safe code can
+ // execute.
+ let copy = unsafe { ptr::read(self) }.into_inner();
+ let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self };
+
+ let ret = f(&mut write_back.copy);
+
+ drop(write_back);
+ ret
+ }
+}
+
+impl<T: Copy> Unalign<T> {
+ /// Gets a copy of the inner `T`.
+ // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
+ #[inline(always)]
+ pub fn get(&self) -> T {
+ let Unalign(val) = *self;
+ val
+ }
+}
+
+impl<T: Unaligned> Deref for Unalign<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ // SAFETY: `deref_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`. `T: Unaligned` guarantees that
+ // `align_of::<T>() == 1`, and all pointers are one-aligned because all
+ // addresses are divisible by 1.
+ unsafe { self.deref_unchecked() }
+ }
+}
+
+impl<T: Unaligned> DerefMut for Unalign<T> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is
+ // aligned to `align_of::<T>()`. `T: Unaligned` guarantees that
+ // `align_of::<T>() == 1`, and all pointers are one-aligned because all
+ // addresses are divisible by 1.
+ unsafe { self.deref_mut_unchecked() }
+ }
+}
+
+impl<T: Unaligned + PartialOrd> PartialOrd<Unalign<T>> for Unalign<T> {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Unalign<T>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + Ord> Ord for Unalign<T> {
+ #[inline(always)]
+ fn cmp(&self, other: &Unalign<T>) -> Ordering {
+ Ord::cmp(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + PartialEq> PartialEq<Unalign<T>> for Unalign<T> {
+ #[inline(always)]
+ fn eq(&self, other: &Unalign<T>) -> bool {
+ PartialEq::eq(self.deref(), other.deref())
+ }
+}
+
+impl<T: Unaligned + Eq> Eq for Unalign<T> {}
+
+impl<T: Unaligned + Hash> Hash for Unalign<T> {
+ #[inline(always)]
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: Hasher,
+ {
+ self.deref().hash(state);
+ }
+}
+
+impl<T: Unaligned + Debug> Debug for Unalign<T> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Debug::fmt(self.deref(), f)
+ }
+}
+
+impl<T: Unaligned + Display> Display for Unalign<T> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(self.deref(), f)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use core::panic::AssertUnwindSafe;
+
+ use super::*;
+ use crate::util::testutil::*;
+
+ /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
+ ///
+ /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
+ /// fot this type to work properly.
+ #[repr(C)]
+ struct ForceUnalign<T, A> {
+ // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
+ // placed at the minimum offset that guarantees its alignment. If
+ // `align_of::<T>() < align_of::<A>()`, then that offset will be
+ // guaranteed *not* to satisfy `align_of::<A>()`.
+ _u: u8,
+ t: T,
+ _a: [A; 0],
+ }
+
+ impl<T, A> ForceUnalign<T, A> {
+ const fn new(t: T) -> ForceUnalign<T, A> {
+ ForceUnalign { _u: 0, t, _a: [] }
+ }
+ }
+
+ #[test]
+ fn test_unalign() {
+ // Test methods that don't depend on alignment.
+ let mut u = Unalign::new(AU64(123));
+ assert_eq!(u.get(), AU64(123));
+ assert_eq!(u.into_inner(), AU64(123));
+ assert_eq!(u.get_ptr(), <*const _>::cast::<AU64>(&u));
+ assert_eq!(u.get_mut_ptr(), <*mut _>::cast::<AU64>(&mut u));
+ u.set(AU64(321));
+ assert_eq!(u.get(), AU64(321));
+
+ // Test methods that depend on alignment (when alignment is satisfied).
+ let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
+ assert_eq!(u.t.try_deref(), Some(&AU64(123)));
+ assert_eq!(u.t.try_deref_mut(), Some(&mut AU64(123)));
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123));
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123));
+ *u.t.try_deref_mut().unwrap() = AU64(321);
+ assert_eq!(u.t.get(), AU64(321));
+
+ // Test methods that depend on alignment (when alignment is not
+ // satisfied).
+ let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123)));
+ assert_eq!(u.t.try_deref(), None);
+ assert_eq!(u.t.try_deref_mut(), None);
+
+ // Test methods that depend on `T: Unaligned`.
+ let mut u = Unalign::new(123u8);
+ assert_eq!(u.try_deref(), Some(&123));
+ assert_eq!(u.try_deref_mut(), Some(&mut 123));
+ assert_eq!(u.deref(), &123);
+ assert_eq!(u.deref_mut(), &mut 123);
+ *u = 21;
+ assert_eq!(u.get(), 21);
+
+ // Test that some `Unalign` functions and methods are `const`.
+ const _UNALIGN: Unalign<u64> = Unalign::new(0);
+ const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr();
+ const _U64: u64 = _UNALIGN.into_inner();
+ // Make sure all code is considered "used".
+ //
+ // TODO(https://github.com/rust-lang/rust/issues/104084): Remove this
+ // attribute.
+ #[allow(dead_code)]
+ const _: () = {
+ let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
+ // Make sure that `deref_unchecked` is `const`.
+ //
+ // SAFETY: The `Align<_, AU64>` guarantees proper alignment.
+ let au64 = unsafe { x.t.deref_unchecked() };
+ match au64 {
+ AU64(123) => {}
+ _ => unreachable!(),
+ }
+ };
+ }
+
+ #[test]
+ fn test_unalign_update() {
+ let mut u = Unalign::new(AU64(123));
+ u.update(|a| a.0 += 1);
+ assert_eq!(u.get(), AU64(124));
+
+ // Test that, even if the callback panics, the original is still
+ // correctly overwritten. Use a `Box` so that Miri is more likely to
+ // catch any unsoundness (which would likely result in two `Box`es for
+ // the same heap object, which is the sort of thing that Miri would
+ // probably catch).
+ let mut u = Unalign::new(Box::new(AU64(123)));
+ let res = std::panic::catch_unwind(AssertUnwindSafe(|| {
+ u.update(|a| {
+ a.0 += 1;
+ panic!();
+ })
+ }));
+ assert!(res.is_err());
+ assert_eq!(u.into_inner(), Box::new(AU64(124)));
+ }
+}
diff --git a/tests/trybuild.rs b/tests/trybuild.rs
new file mode 100644
index 0000000..4ed01f7
--- /dev/null
+++ b/tests/trybuild.rs
@@ -0,0 +1,45 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// UI tests depend on the exact error messages emitted by rustc, but those error
+// messages are not stable, and sometimes change between Rust versions. Thus, we
+// maintain one set of UI tests for each Rust version that we test in CI, and we
+// pin to specific versions in CI (a specific stable version, a specific date of
+// the nightly compiler, and a specific MSRV). Updating those pinned versions
+// may also require updating these tests.
+// - `tests/ui-nightly` - Contains the source of truth for our UI test source
+// files (`.rs`), and contains `.err` and `.out` files for nightly
+// - `tests/ui-stable` - Contains symlinks to the `.rs` files in
+// `tests/ui-nightly`, and contains `.err` and `.out` files for stable
+// - `tests/ui-msrv` - Contains symlinks to the `.rs` files in
+// `tests/ui-nightly`, and contains `.err` and `.out` files for MSRV
+
+#[rustversion::nightly]
+const SOURCE_FILES_DIR: &str = "tests/ui-nightly";
+#[rustversion::stable(1.69.0)]
+const SOURCE_FILES_DIR: &str = "tests/ui-stable";
+#[rustversion::stable(1.61.0)]
+const SOURCE_FILES_DIR: &str = "tests/ui-msrv";
+
+const SOURCE_FILES_DIR: &str = "tests/ui-stable";
+
+#[test]
+fn ui() {
+ let t = trybuild::TestCases::new();
+ t.compile_fail(format!("{SOURCE_FILES_DIR}/*.rs"));
+}
+
+// The file `invalid-impls.rs` directly includes `src/macros.rs` in order to
+// test the `impl_or_verify!` macro which is defined in that file. Specifically,
+// it tests the verification portion of that macro, which is enabled when
+// `cfg(any(feature = "derive", test))`. While `--cfg test` is of course passed
+// to the code in the file you're reading right now, `trybuild` does not pass
+// `--cfg test` when it invokes Cargo. As a result, this `trybuild` test only
+// tests the correct behavior when the "derive" feature is enabled.
+#[cfg(feature = "derive")]
+#[test]
+fn ui_invalid_impls() {
+ let t = trybuild::TestCases::new();
+ t.compile_fail(format!("{SOURCE_FILES_DIR}/invalid-impls/*.rs"));
+}
diff --git a/tests/ui-msrv/invalid-impls/invalid-impls.rs b/tests/ui-msrv/invalid-impls/invalid-impls.rs
new file mode 100644
index 0000000..b9a60bd
--- /dev/null
+++ b/tests/ui-msrv/invalid-impls/invalid-impls.rs
@@ -0,0 +1,25 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Since some macros from `macros.rs` are unused.
+#![allow(unused)]
+
+extern crate zerocopy;
+extern crate zerocopy_derive;
+
+include!("../../../src/macros.rs");
+
+use zerocopy::*;
+use zerocopy_derive::*;
+
+fn main() {}
+
+#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+#[repr(transparent)]
+struct Foo<T>(T);
+
+impl_or_verify!(T => FromZeroes for Foo<T>);
+impl_or_verify!(T => FromBytes for Foo<T>);
+impl_or_verify!(T => AsBytes for Foo<T>);
+impl_or_verify!(T => Unaligned for Foo<T>);
diff --git a/tests/ui-msrv/invalid-impls/invalid-impls.stderr b/tests/ui-msrv/invalid-impls/invalid-impls.stderr
new file mode 100644
index 0000000..fee0cd9
--- /dev/null
+++ b/tests/ui-msrv/invalid-impls/invalid-impls.stderr
@@ -0,0 +1,127 @@
+error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ | ^^^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:22:1
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ------------------------------------------- in this macro invocation
+ |
+note: required because of the requirements on the impl of `zerocopy::FromZeroes` for `Foo<T>`
+ --> tests/ui-msrv/invalid-impls/invalid-impls.rs:18:10
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^^
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `_::Subtrait`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:22:1
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ------------------------------------------- in this macro invocation
+ = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+22 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo<T>);
+ | ++++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ | ^^^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:23:1
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ |
+note: required because of the requirements on the impl of `zerocopy::FromBytes` for `Foo<T>`
+ --> tests/ui-msrv/invalid-impls/invalid-impls.rs:18:22
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `_::Subtrait`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:23:1
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+23 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo<T>);
+ | +++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ | ^^^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:24:1
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ---------------------------------------- in this macro invocation
+ |
+note: required because of the requirements on the impl of `zerocopy::AsBytes` for `Foo<T>`
+ --> tests/ui-msrv/invalid-impls/invalid-impls.rs:18:33
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `_::Subtrait`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:24:1
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ---------------------------------------- in this macro invocation
+ = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+24 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo<T>);
+ | +++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
+ | ^^^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:25:1
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ |
+note: required because of the requirements on the impl of `zerocopy::Unaligned` for `Foo<T>`
+ --> tests/ui-msrv/invalid-impls/invalid-impls.rs:18:42
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-msrv/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `_::Subtrait`
+ |
+ ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:25:1
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+25 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo<T>);
+ | +++++++++++++++++++++
diff --git a/tests/ui-msrv/transmute-illegal.rs b/tests/ui-msrv/transmute-illegal.rs
new file mode 100644
index 0000000..74b8439
--- /dev/null
+++ b/tests/ui-msrv/transmute-illegal.rs
@@ -0,0 +1,10 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern crate zerocopy;
+
+fn main() {}
+
+// It is unsound to inspect the usize value of a pointer during const eval.
+const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
diff --git a/tests/ui-msrv/transmute-illegal.stderr b/tests/ui-msrv/transmute-illegal.stderr
new file mode 100644
index 0000000..37c124a
--- /dev/null
+++ b/tests/ui-msrv/transmute-illegal.stderr
@@ -0,0 +1,18 @@
+error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied
+ --> tests/ui-msrv/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `*const usize`
+ |
+ = help: the following implementations were found:
+ <usize as AsBytes>
+ <f32 as AsBytes>
+ <f64 as AsBytes>
+ <i128 as AsBytes>
+ and $N others
+note: required by a bound in `POINTER_VALUE::transmute`
+ --> tests/ui-msrv/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `POINTER_VALUE::transmute`
+ = note: this error originates in the macro `zerocopy::transmute` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/tests/ui-nightly/invalid-impls/invalid-impls.rs b/tests/ui-nightly/invalid-impls/invalid-impls.rs
new file mode 100644
index 0000000..b9a60bd
--- /dev/null
+++ b/tests/ui-nightly/invalid-impls/invalid-impls.rs
@@ -0,0 +1,25 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Since some macros from `macros.rs` are unused.
+#![allow(unused)]
+
+extern crate zerocopy;
+extern crate zerocopy_derive;
+
+include!("../../../src/macros.rs");
+
+use zerocopy::*;
+use zerocopy_derive::*;
+
+fn main() {}
+
+#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+#[repr(transparent)]
+struct Foo<T>(T);
+
+impl_or_verify!(T => FromZeroes for Foo<T>);
+impl_or_verify!(T => FromBytes for Foo<T>);
+impl_or_verify!(T => AsBytes for Foo<T>);
+impl_or_verify!(T => Unaligned for Foo<T>);
diff --git a/tests/ui-nightly/invalid-impls/invalid-impls.stderr b/tests/ui-nightly/invalid-impls/invalid-impls.stderr
new file mode 100644
index 0000000..7d839ac
--- /dev/null
+++ b/tests/ui-nightly/invalid-impls/invalid-impls.stderr
@@ -0,0 +1,107 @@
+error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:22:37
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::FromZeroes`
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:18:10
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-nightly/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:22:1
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ------------------------------------------- in this macro invocation
+ = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+22 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo<T>);
+ | ++++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:23:36
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::FromBytes`
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:18:22
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-nightly/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:23:1
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+23 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo<T>);
+ | +++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:24:34
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::AsBytes`
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:18:33
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-nightly/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:24:1
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ---------------------------------------- in this macro invocation
+ = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+24 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo<T>);
+ | +++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:25:36
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::Unaligned`
+ --> tests/ui-nightly/invalid-impls/invalid-impls.rs:18:42
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-nightly/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:25:1
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+25 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo<T>);
+ | +++++++++++++++++++++
diff --git a/tests/ui-nightly/transmute-illegal.rs b/tests/ui-nightly/transmute-illegal.rs
new file mode 100644
index 0000000..74b8439
--- /dev/null
+++ b/tests/ui-nightly/transmute-illegal.rs
@@ -0,0 +1,10 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern crate zerocopy;
+
+fn main() {}
+
+// It is unsound to inspect the usize value of a pointer during const eval.
+const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
diff --git a/tests/ui-nightly/transmute-illegal.stderr b/tests/ui-nightly/transmute-illegal.stderr
new file mode 100644
index 0000000..a57544b
--- /dev/null
+++ b/tests/ui-nightly/transmute-illegal.stderr
@@ -0,0 +1,16 @@
+error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied
+ --> tests/ui-nightly/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | the trait `AsBytes` is not implemented for `*const usize`
+ | required by a bound introduced by this call
+ |
+ = help: the trait `AsBytes` is implemented for `usize`
+note: required by a bound in `POINTER_VALUE::transmute`
+ --> tests/ui-nightly/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `transmute`
+ = note: this error originates in the macro `zerocopy::transmute` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/tests/ui-stable/invalid-impls/invalid-impls.rs b/tests/ui-stable/invalid-impls/invalid-impls.rs
new file mode 100644
index 0000000..b9a60bd
--- /dev/null
+++ b/tests/ui-stable/invalid-impls/invalid-impls.rs
@@ -0,0 +1,25 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Since some macros from `macros.rs` are unused.
+#![allow(unused)]
+
+extern crate zerocopy;
+extern crate zerocopy_derive;
+
+include!("../../../src/macros.rs");
+
+use zerocopy::*;
+use zerocopy_derive::*;
+
+fn main() {}
+
+#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+#[repr(transparent)]
+struct Foo<T>(T);
+
+impl_or_verify!(T => FromZeroes for Foo<T>);
+impl_or_verify!(T => FromBytes for Foo<T>);
+impl_or_verify!(T => AsBytes for Foo<T>);
+impl_or_verify!(T => Unaligned for Foo<T>);
diff --git a/tests/ui-stable/invalid-impls/invalid-impls.stderr b/tests/ui-stable/invalid-impls/invalid-impls.stderr
new file mode 100644
index 0000000..f613377
--- /dev/null
+++ b/tests/ui-stable/invalid-impls/invalid-impls.stderr
@@ -0,0 +1,107 @@
+error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:22:37
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::FromZeroes`
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:18:10
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-stable/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-stable/invalid-impls/invalid-impls.rs:22:1
+ |
+22 | impl_or_verify!(T => FromZeroes for Foo<T>);
+ | ------------------------------------------- in this macro invocation
+ = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+22 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo<T>);
+ | ++++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:23:36
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::FromBytes`
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:18:22
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-stable/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-stable/invalid-impls/invalid-impls.rs:23:1
+ |
+23 | impl_or_verify!(T => FromBytes for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+23 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo<T>);
+ | +++++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:24:34
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::AsBytes`
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:18:33
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-stable/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-stable/invalid-impls/invalid-impls.rs:24:1
+ |
+24 | impl_or_verify!(T => AsBytes for Foo<T>);
+ | ---------------------------------------- in this macro invocation
+ = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+24 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo<T>);
+ | +++++++++++++++++++
+
+error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:25:36
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T`
+ |
+note: required for `Foo<T>` to implement `zerocopy::Unaligned`
+ --> tests/ui-stable/invalid-impls/invalid-impls.rs:18:42
+ |
+18 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
+ | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro
+note: required by a bound in `_::Subtrait`
+ --> tests/ui-stable/invalid-impls/../../../src/macros.rs
+ |
+ | trait Subtrait: $trait {}
+ | ^^^^^^ required by this bound in `Subtrait`
+ |
+ ::: tests/ui-stable/invalid-impls/invalid-impls.rs:25:1
+ |
+25 | impl_or_verify!(T => Unaligned for Foo<T>);
+ | ------------------------------------------ in this macro invocation
+ = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info)
+help: consider restricting type parameter `T`
+ |
+25 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo<T>);
+ | +++++++++++++++++++++
diff --git a/tests/ui-stable/transmute-illegal.rs b/tests/ui-stable/transmute-illegal.rs
new file mode 100644
index 0000000..74b8439
--- /dev/null
+++ b/tests/ui-stable/transmute-illegal.rs
@@ -0,0 +1,10 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern crate zerocopy;
+
+fn main() {}
+
+// It is unsound to inspect the usize value of a pointer during const eval.
+const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
diff --git a/tests/ui-stable/transmute-illegal.stderr b/tests/ui-stable/transmute-illegal.stderr
new file mode 100644
index 0000000..e9ac240
--- /dev/null
+++ b/tests/ui-stable/transmute-illegal.stderr
@@ -0,0 +1,16 @@
+error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied
+ --> tests/ui-stable/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | the trait `AsBytes` is not implemented for `*const usize`
+ | required by a bound introduced by this call
+ |
+ = help: the trait `AsBytes` is implemented for `usize`
+note: required by a bound in `POINTER_VALUE::transmute`
+ --> tests/ui-stable/transmute-illegal.rs:10:30
+ |
+10 | const POINTER_VALUE: usize = zerocopy::transmute!(&0usize as *const usize);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `transmute`
+ = note: this error originates in the macro `zerocopy::transmute` (in Nightly builds, run with -Z macro-backtrace for more info)