aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ryhl <aliceryhl@google.com>2023-11-01 10:29:24 +0000
committerAlice Ryhl <aliceryhl@google.com>2023-11-01 12:09:37 +0000
commit98c70049ece2a1ef8feb2cee1523e12bf980fbf9 (patch)
treea55449e24e272fabf9ced9b4ca2ad3a4d70c49d9
parente80b3d6e5fffc67d4e978f24b3e7e4135bff4d60 (diff)
downloadtokio-macros-98c70049ece2a1ef8feb2cee1523e12bf980fbf9.tar.gz
Update tokio-macros to v2.1.0
Test: ran Tokio's tests Change-Id: I8b3045bf350e3fa3f27fd176bfc11a92cdbed86f
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp2
-rw-r--r--CHANGELOG.md8
-rw-r--r--Cargo.toml2
-rw-r--r--Cargo.toml.orig2
-rw-r--r--METADATA8
-rw-r--r--TEST_MAPPING6
-rw-r--r--src/entry.rs185
-rw-r--r--src/lib.rs11
9 files changed, 170 insertions, 56 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 8ef4c84..da31265 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "614fe357fc4a91bc6d1c904fbbfe3c5959f690f6"
+ "sha1": "66c62a4b74f9da7be3198bb7ac77d8d18967d51e"
},
"path_in_vcs": "tokio-macros"
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index f0df369..846146a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -22,7 +22,7 @@ rust_proc_macro {
name: "libtokio_macros",
crate_name: "tokio_macros",
cargo_env_compat: true,
- cargo_pkg_version: "2.0.0",
+ cargo_pkg_version: "2.1.0",
srcs: ["src/lib.rs"],
edition: "2018",
rustlibs: [
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2acf402..e9d58db 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,11 @@
+# 2.1.0 (April 25th, 2023)
+
+- macros: fix typo in `#[tokio::test]` docs ([#5636])
+- macros: make entrypoints more efficient ([#5621])
+
+[#5621]: https://github.com/tokio-rs/tokio/pull/5621
+[#5636]: https://github.com/tokio-rs/tokio/pull/5636
+
# 2.0.0 (March 24th, 2023)
This major release updates the dependency on the syn crate to 2.0.0, and
diff --git a/Cargo.toml b/Cargo.toml
index 0dd94fb..6bd8b81 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2018"
rust-version = "1.56"
name = "tokio-macros"
-version = "2.0.0"
+version = "2.1.0"
authors = ["Tokio Contributors <team@tokio.rs>"]
description = """
Tokio's proc macros.
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 291da3c..4ca789d 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -4,7 +4,7 @@ name = "tokio-macros"
# - Remove path dependencies
# - Update CHANGELOG.md.
# - Create "tokio-macros-1.x.y" git tag.
-version = "2.0.0"
+version = "2.1.0"
edition = "2018"
rust-version = "1.56"
authors = ["Tokio Contributors <team@tokio.rs>"]
diff --git a/METADATA b/METADATA
index 118d743..d71aedd 100644
--- a/METADATA
+++ b/METADATA
@@ -11,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/tokio-macros/tokio-macros-2.0.0.crate"
+ value: "https://static.crates.io/crates/tokio-macros/tokio-macros-2.1.0.crate"
}
- version: "2.0.0"
+ version: "2.1.0"
license_type: NOTICE
last_upgrade_date {
year: 2023
- month: 4
- day: 3
+ month: 11
+ day: 1
}
}
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 170da8f..a32d61c 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -18,6 +18,12 @@
},
{
"path": "packages/modules/DnsResolver"
+ },
+ {
+ "path": "system/security/keystore2"
+ },
+ {
+ "path": "system/security/keystore2/legacykeystore"
}
]
}
diff --git a/src/entry.rs b/src/entry.rs
index 3b124cf..edac530 100644
--- a/src/entry.rs
+++ b/src/entry.rs
@@ -1,7 +1,7 @@
-use proc_macro::TokenStream;
-use proc_macro2::Span;
+use proc_macro2::{Span, TokenStream, TokenTree};
use quote::{quote, quote_spanned, ToTokens};
-use syn::{parse::Parser, Ident, Path};
+use syn::parse::{Parse, ParseStream, Parser};
+use syn::{braced, Attribute, Ident, Path, Signature, Visibility};
// syn::AttributeArgs does not implement syn::Parse
type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
@@ -230,7 +230,7 @@ fn parse_bool(bool: syn::Lit, span: Span, field: &str) -> Result<bool, syn::Erro
}
fn build_config(
- input: syn::ItemFn,
+ input: &ItemFn,
args: AttributeArgs,
is_test: bool,
rt_multi_thread: bool,
@@ -324,18 +324,13 @@ fn build_config(
config.build()
}
-fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> TokenStream {
+fn parse_knobs(mut input: ItemFn, is_test: bool, config: FinalConfig) -> TokenStream {
input.sig.asyncness = None;
// If type mismatch occurs, the current rustc points to the last statement.
let (last_stmt_start_span, last_stmt_end_span) = {
- let mut last_stmt = input
- .block
- .stmts
- .last()
- .map(ToTokens::into_token_stream)
- .unwrap_or_default()
- .into_iter();
+ let mut last_stmt = input.stmts.last().cloned().unwrap_or_default().into_iter();
+
// `Span` on stable Rust has a limitation that only points to the first
// token, not the whole tokens. We can work around this limitation by
// using the first/last span of the tokens like
@@ -373,10 +368,8 @@ fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> To
quote! {}
};
- let body = &input.block;
- let brace_token = input.block.brace_token;
let body_ident = quote! { body };
- let block_expr = quote_spanned! {last_stmt_end_span=>
+ let last_block = quote_spanned! {last_stmt_end_span=>
#[allow(clippy::expect_used, clippy::diverging_sub_expression)]
{
return #rt
@@ -387,6 +380,8 @@ fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> To
}
};
+ let body = input.body();
+
// For test functions pin the body to the stack and use `Pin<&mut dyn
// Future>` to reduce the amount of `Runtime::block_on` (and related
// functions) copies we generate during compilation due to the generic
@@ -415,25 +410,11 @@ fn parse_knobs(mut input: syn::ItemFn, is_test: bool, config: FinalConfig) -> To
}
};
- input.block = syn::parse2(quote! {
- {
- #body
- #block_expr
- }
- })
- .expect("Parsing failure");
- input.block.brace_token = brace_token;
-
- let result = quote! {
- #header
- #input
- };
-
- result.into()
+ input.into_tokens(header, body, last_block)
}
fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream {
- tokens.extend(TokenStream::from(error.into_compile_error()));
+ tokens.extend(error.into_compile_error());
tokens
}
@@ -442,7 +423,7 @@ pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool)
// If any of the steps for this macro fail, we still want to expand to an item that is as close
// to the expected output as possible. This helps out IDEs such that completions and other
// related features keep working.
- let input: syn::ItemFn = match syn::parse(item.clone()) {
+ let input: ItemFn = match syn::parse2(item.clone()) {
Ok(it) => it,
Err(e) => return token_stream_with_error(item, e),
};
@@ -452,8 +433,8 @@ pub(crate) fn main(args: TokenStream, item: TokenStream, rt_multi_thread: bool)
Err(syn::Error::new_spanned(&input.sig.ident, msg))
} else {
AttributeArgs::parse_terminated
- .parse(args)
- .and_then(|args| build_config(input.clone(), args, false, rt_multi_thread))
+ .parse2(args)
+ .and_then(|args| build_config(&input, args, false, rt_multi_thread))
};
match config {
@@ -466,21 +447,17 @@ pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool)
// If any of the steps for this macro fail, we still want to expand to an item that is as close
// to the expected output as possible. This helps out IDEs such that completions and other
// related features keep working.
- let input: syn::ItemFn = match syn::parse(item.clone()) {
+ let input: ItemFn = match syn::parse2(item.clone()) {
Ok(it) => it,
Err(e) => return token_stream_with_error(item, e),
};
- let config = if let Some(attr) = input
- .attrs
- .iter()
- .find(|attr| attr.meta.path().is_ident("test"))
- {
+ let config = if let Some(attr) = input.attrs().find(|attr| attr.meta.path().is_ident("test")) {
let msg = "second test attribute is supplied";
Err(syn::Error::new_spanned(attr, msg))
} else {
AttributeArgs::parse_terminated
- .parse(args)
- .and_then(|args| build_config(input.clone(), args, true, rt_multi_thread))
+ .parse2(args)
+ .and_then(|args| build_config(&input, args, true, rt_multi_thread))
};
match config {
@@ -488,3 +465,127 @@ pub(crate) fn test(args: TokenStream, item: TokenStream, rt_multi_thread: bool)
Err(e) => token_stream_with_error(parse_knobs(input, true, DEFAULT_ERROR_CONFIG), e),
}
}
+
+struct ItemFn {
+ outer_attrs: Vec<Attribute>,
+ vis: Visibility,
+ sig: Signature,
+ brace_token: syn::token::Brace,
+ inner_attrs: Vec<Attribute>,
+ stmts: Vec<proc_macro2::TokenStream>,
+}
+
+impl ItemFn {
+ /// Access all attributes of the function item.
+ fn attrs(&self) -> impl Iterator<Item = &Attribute> {
+ self.outer_attrs.iter().chain(self.inner_attrs.iter())
+ }
+
+ /// Get the body of the function item in a manner so that it can be
+ /// conveniently used with the `quote!` macro.
+ fn body(&self) -> Body<'_> {
+ Body {
+ brace_token: self.brace_token,
+ stmts: &self.stmts,
+ }
+ }
+
+ /// Convert our local function item into a token stream.
+ fn into_tokens(
+ self,
+ header: proc_macro2::TokenStream,
+ body: proc_macro2::TokenStream,
+ last_block: proc_macro2::TokenStream,
+ ) -> TokenStream {
+ let mut tokens = proc_macro2::TokenStream::new();
+ header.to_tokens(&mut tokens);
+
+ // Outer attributes are simply streamed as-is.
+ for attr in self.outer_attrs {
+ attr.to_tokens(&mut tokens);
+ }
+
+ // Inner attributes require extra care, since they're not supported on
+ // blocks (which is what we're expanded into) we instead lift them
+ // outside of the function. This matches the behaviour of `syn`.
+ for mut attr in self.inner_attrs {
+ attr.style = syn::AttrStyle::Outer;
+ attr.to_tokens(&mut tokens);
+ }
+
+ self.vis.to_tokens(&mut tokens);
+ self.sig.to_tokens(&mut tokens);
+
+ self.brace_token.surround(&mut tokens, |tokens| {
+ body.to_tokens(tokens);
+ last_block.to_tokens(tokens);
+ });
+
+ tokens
+ }
+}
+
+impl Parse for ItemFn {
+ #[inline]
+ fn parse(input: ParseStream<'_>) -> syn::Result<Self> {
+ // This parse implementation has been largely lifted from `syn`, with
+ // the exception of:
+ // * We don't have access to the plumbing necessary to parse inner
+ // attributes in-place.
+ // * We do our own statements parsing to avoid recursively parsing
+ // entire statements and only look for the parts we're interested in.
+
+ let outer_attrs = input.call(Attribute::parse_outer)?;
+ let vis: Visibility = input.parse()?;
+ let sig: Signature = input.parse()?;
+
+ let content;
+ let brace_token = braced!(content in input);
+ let inner_attrs = Attribute::parse_inner(&content)?;
+
+ let mut buf = proc_macro2::TokenStream::new();
+ let mut stmts = Vec::new();
+
+ while !content.is_empty() {
+ if let Some(semi) = content.parse::<Option<syn::Token![;]>>()? {
+ semi.to_tokens(&mut buf);
+ stmts.push(buf);
+ buf = proc_macro2::TokenStream::new();
+ continue;
+ }
+
+ // Parse a single token tree and extend our current buffer with it.
+ // This avoids parsing the entire content of the sub-tree.
+ buf.extend([content.parse::<TokenTree>()?]);
+ }
+
+ if !buf.is_empty() {
+ stmts.push(buf);
+ }
+
+ Ok(Self {
+ outer_attrs,
+ vis,
+ sig,
+ brace_token,
+ inner_attrs,
+ stmts,
+ })
+ }
+}
+
+struct Body<'a> {
+ brace_token: syn::token::Brace,
+ // Statements, with terminating `;`.
+ stmts: &'a [TokenStream],
+}
+
+impl ToTokens for Body<'_> {
+ fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
+ self.brace_token.surround(tokens, |tokens| {
+ for stmt in self.stmts {
+ stmt.to_tokens(tokens);
+ }
+ })
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 11bbbae..1d024f5 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -204,7 +204,7 @@ use proc_macro::TokenStream;
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
- entry::main(args, item, true)
+ entry::main(args.into(), item.into(), true).into()
}
/// Marks async function to be executed by selected runtime. This macro helps set up a `Runtime`
@@ -269,7 +269,7 @@ pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream {
- entry::main(args, item, false)
+ entry::main(args.into(), item.into(), false).into()
}
/// Marks async function to be executed by runtime, suitable to test environment.
@@ -295,8 +295,7 @@ pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream {
/// ```
///
/// The `worker_threads` option configures the number of worker threads, and
-/// defaults to the number of cpus on the system. This is the default
-/// flavor.
+/// defaults to the number of cpus on the system.
///
/// Note: The multi-threaded runtime requires the `rt-multi-thread` feature
/// flag.
@@ -427,7 +426,7 @@ pub fn main_rt(args: TokenStream, item: TokenStream) -> TokenStream {
/// ```
#[proc_macro_attribute]
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
- entry::test(args, item, true)
+ entry::test(args.into(), item.into(), true).into()
}
/// Marks async function to be executed by runtime, suitable to test environment
@@ -442,7 +441,7 @@ pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
/// ```
#[proc_macro_attribute]
pub fn test_rt(args: TokenStream, item: TokenStream) -> TokenStream {
- entry::test(args, item, false)
+ entry::test(args.into(), item.into(), false).into()
}
/// Always fails with the error message below.