summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2023-02-03 12:08:31 +0100
committerJeff Vander Stoep <jeffv@google.com>2023-02-03 12:08:31 +0100
commit8affe0a1c21bbce615d65c02c1371c4e3c4a007b (patch)
treeb422816a3105a3ca0224abb2c0c1a0f1639037ad
parentf7698963524eb363f4c643166d1a95d2a9c63211 (diff)
downloadpest_generator-8affe0a1c21bbce615d65c02c1371c4e3c4a007b.tar.gz
Upgrade pest_generator to 2.5.4
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update rust/crates/pest_generator For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md Test: TreeHugger Change-Id: I5adf6ac238e68dab251099461c39fd74e6f3ffc1
-rw-r--r--Android.bp6
-rw-r--r--Cargo.toml6
-rw-r--r--Cargo.toml.orig6
-rw-r--r--METADATA10
-rw-r--r--src/docs.rs122
-rw-r--r--src/generator.rs116
-rw-r--r--src/lib.rs168
-rw-r--r--tests/test.pest20
8 files changed, 363 insertions, 91 deletions
diff --git a/Android.bp b/Android.bp
index f64f23b..bb1c5bb 100644
--- a/Android.bp
+++ b/Android.bp
@@ -43,7 +43,7 @@ rust_library_host {
name: "libpest_generator",
crate_name: "pest_generator",
cargo_env_compat: true,
- cargo_pkg_version: "2.5.1",
+ cargo_pkg_version: "2.5.4",
srcs: ["src/lib.rs"],
edition: "2021",
features: [
@@ -58,8 +58,4 @@ rust_library_host {
"libsyn",
],
compile_multilib: "first",
- apex_available: [
- "//apex_available:platform",
- "//apex_available:anyapex",
- ],
}
diff --git a/Cargo.toml b/Cargo.toml
index e21784d..8289ad8 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.56"
name = "pest_generator"
-version = "2.5.1"
+version = "2.5.4"
authors = ["DragoČ™ Tiselice <dragostiselice@gmail.com>"]
description = "pest code generator"
homepage = "https://pest.rs/"
@@ -28,11 +28,11 @@ license = "MIT/Apache-2.0"
repository = "https://github.com/pest-parser/pest"
[dependencies.pest]
-version = "2.5.1"
+version = "2.5.4"
default-features = false
[dependencies.pest_meta]
-version = "2.5.1"
+version = "2.5.4"
[dependencies.proc-macro2]
version = "1.0"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 1836fe0..df4b4da 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,7 +1,7 @@
[package]
name = "pest_generator"
description = "pest code generator"
-version = "2.5.1"
+version = "2.5.4"
edition = "2021"
authors = ["DragoČ™ Tiselice <dragostiselice@gmail.com>"]
homepage = "https://pest.rs/"
@@ -18,8 +18,8 @@ default = ["std"]
std = ["pest/std"]
[dependencies]
-pest = { path = "../pest", version = "2.5.1", default-features = false }
-pest_meta = { path = "../meta", version = "2.5.1" }
+pest = { path = "../pest", version = "2.5.4", default-features = false }
+pest_meta = { path = "../meta", version = "2.5.4" }
proc-macro2 = "1.0"
quote = "1.0"
syn = "1.0"
diff --git a/METADATA b/METADATA
index ce1af20..6295625 100644
--- a/METADATA
+++ b/METADATA
@@ -11,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/pest_generator/pest_generator-2.5.1.crate"
+ value: "https://static.crates.io/crates/pest_generator/pest_generator-2.5.4.crate"
}
- version: "2.5.1"
+ version: "2.5.4"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 12
- day: 13
+ year: 2023
+ month: 2
+ day: 3
}
}
diff --git a/src/docs.rs b/src/docs.rs
new file mode 100644
index 0000000..8660e53
--- /dev/null
+++ b/src/docs.rs
@@ -0,0 +1,122 @@
+use pest::iterators::Pairs;
+use pest_meta::parser::Rule;
+use std::collections::HashMap;
+
+#[derive(Debug)]
+pub(crate) struct DocComment {
+ pub grammar_doc: String,
+
+ /// HashMap for store all doc_comments for rules.
+ /// key is rule name, value is doc_comment.
+ pub line_docs: HashMap<String, String>,
+}
+
+/// Consume pairs to matches `Rule::grammar_doc`, `Rule::line_doc` into `DocComment`
+///
+/// e.g.
+///
+/// a pest file:
+///
+/// ```ignore
+/// //! This is a grammar doc
+/// /// line doc 1
+/// /// line doc 2
+/// foo = {}
+///
+/// /// line doc 3
+/// bar = {}
+/// ```
+///
+/// Then will get:
+///
+/// ```ignore
+/// grammar_doc = "This is a grammar doc"
+/// line_docs = { "foo": "line doc 1\nline doc 2", "bar": "line doc 3" }
+/// ```
+pub(crate) fn consume(pairs: Pairs<'_, Rule>) -> DocComment {
+ let mut grammar_doc = String::new();
+
+ let mut line_docs: HashMap<String, String> = HashMap::new();
+ let mut line_doc = String::new();
+
+ for pair in pairs {
+ match pair.as_rule() {
+ Rule::grammar_doc => {
+ // grammar_doc > inner_doc
+ let inner_doc = pair.into_inner().next().unwrap();
+ grammar_doc.push_str(inner_doc.as_str());
+ grammar_doc.push('\n');
+ }
+ Rule::grammar_rule => {
+ if let Some(inner) = pair.into_inner().next() {
+ // grammar_rule > line_doc | identifier
+ match inner.as_rule() {
+ Rule::line_doc => {
+ if let Some(inner_doc) = inner.into_inner().next() {
+ line_doc.push_str(inner_doc.as_str());
+ line_doc.push('\n');
+ }
+ }
+ Rule::identifier => {
+ if !line_doc.is_empty() {
+ let rule_name = inner.as_str().to_owned();
+
+ // Remove last \n
+ line_doc.pop();
+ line_docs.insert(rule_name, line_doc.clone());
+ line_doc.clear();
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+ _ => (),
+ }
+ }
+
+ if !grammar_doc.is_empty() {
+ // Remove last \n
+ grammar_doc.pop();
+ }
+
+ DocComment {
+ grammar_doc,
+ line_docs,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashMap;
+
+ use pest_meta::parser;
+ use pest_meta::parser::Rule;
+
+ #[test]
+ fn test_doc_comment() {
+ let pairs = match parser::parse(Rule::grammar_rules, include_str!("../tests/test.pest")) {
+ Ok(pairs) => pairs,
+ Err(_) => panic!("error parsing tests/test.pest"),
+ };
+
+ let doc_comment = super::consume(pairs);
+
+ let mut expected = HashMap::new();
+ expected.insert("foo".to_owned(), "Matches foo str, e.g.: `foo`".to_owned());
+ expected.insert(
+ "bar".to_owned(),
+ "Matches bar str,\n Indent 2, e.g: `bar` or `foobar`".to_owned(),
+ );
+ expected.insert(
+ "dar".to_owned(),
+ "Matches dar\nMatch dar description".to_owned(),
+ );
+ assert_eq!(expected, doc_comment.line_docs);
+
+ assert_eq!(
+ "A parser for JSON file.\nAnd this is a example for JSON parser.\n\n indent-4-space",
+ doc_comment.grammar_doc
+ );
+ }
+}
diff --git a/src/generator.rs b/src/generator.rs
index f3da1ba..87d1f00 100644
--- a/src/generator.rs
+++ b/src/generator.rs
@@ -9,20 +9,23 @@
use std::path::PathBuf;
-use proc_macro2::{Span, TokenStream};
+use proc_macro2::TokenStream;
use quote::{ToTokens, TokenStreamExt};
use syn::{self, Generics, Ident};
+use pest::unicode::unicode_property_names;
use pest_meta::ast::*;
use pest_meta::optimizer::*;
-use pest_meta::UNICODE_PROPERTY_NAMES;
-pub fn generate(
+use crate::docs::DocComment;
+
+pub(crate) fn generate(
name: Ident,
generics: &Generics,
path: Option<PathBuf>,
rules: Vec<OptimizedRule>,
defaults: Vec<&str>,
+ doc_comment: &DocComment,
include_grammar: bool,
) -> TokenStream {
let uses_eoi = defaults.iter().any(|name| *name == "EOI");
@@ -36,7 +39,7 @@ pub fn generate(
} else {
quote!()
};
- let rule_enum = generate_enum(&rules, uses_eoi);
+ let rule_enum = generate_enum(&rules, doc_comment, uses_eoi);
let patterns = generate_patterns(&rules, uses_eoi);
let skip = generate_skip(&rules);
@@ -153,7 +156,7 @@ fn generate_builtin_rules() -> Vec<(&'static str, TokenStream)> {
let box_ty = box_type();
- for property in UNICODE_PROPERTY_NAMES {
+ for property in unicode_property_names() {
let property_ident: Ident = syn::parse_str(property).unwrap();
// insert manually for #property substitution
builtins.push((property, quote! {
@@ -169,7 +172,7 @@ fn generate_builtin_rules() -> Vec<(&'static str, TokenStream)> {
// Needed because Cargo doesn't watch for changes in grammars.
fn generate_include(name: &Ident, path: &str) -> TokenStream {
- let const_name = Ident::new(&format!("_PEST_GRAMMAR_{}", name), Span::call_site());
+ let const_name = format_ident!("_PEST_GRAMMAR_{}", name);
// Need to make this relative to the current directory since the path to the file
// is derived from the CARGO_MANIFEST_DIR environment variable
let mut current_dir = std::env::current_dir().expect("Unable to get current directory");
@@ -181,12 +184,25 @@ fn generate_include(name: &Ident, path: &str) -> TokenStream {
}
}
-fn generate_enum(rules: &[OptimizedRule], uses_eoi: bool) -> TokenStream {
- let rules = rules
- .iter()
- .map(|rule| Ident::new(rule.name.as_str(), Span::call_site()));
+fn generate_enum(rules: &[OptimizedRule], doc_comment: &DocComment, uses_eoi: bool) -> TokenStream {
+ let rules = rules.iter().map(|rule| {
+ let rule_name = format_ident!("r#{}", rule.name);
+
+ match doc_comment.line_docs.get(&rule.name) {
+ Some(doc) => quote! {
+ #[doc = #doc]
+ #rule_name
+ },
+ None => quote! {
+ #rule_name
+ },
+ }
+ });
+
+ let grammar_doc = &doc_comment.grammar_doc;
if uses_eoi {
quote! {
+ #[doc = #grammar_doc]
#[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Rule {
@@ -196,6 +212,7 @@ fn generate_enum(rules: &[OptimizedRule], uses_eoi: bool) -> TokenStream {
}
} else {
quote! {
+ #[doc = #grammar_doc]
#[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Rule {
@@ -209,7 +226,8 @@ fn generate_patterns(rules: &[OptimizedRule], uses_eoi: bool) -> TokenStream {
let mut rules: Vec<TokenStream> = rules
.iter()
.map(|rule| {
- let rule = Ident::new(rule.name.as_str(), Span::call_site());
+ let rule = format_ident!("r#{}", rule.name);
+
quote! {
Rule::#rule => rules::#rule(state)
}
@@ -228,10 +246,10 @@ fn generate_patterns(rules: &[OptimizedRule], uses_eoi: bool) -> TokenStream {
}
fn generate_rule(rule: OptimizedRule) -> TokenStream {
- let name = Ident::new(&rule.name, Span::call_site());
+ let name = format_ident!("r#{}", rule.name);
let expr = if rule.ty == RuleType::Atomic || rule.ty == RuleType::CompoundAtomic {
generate_expr_atomic(rule.expr)
- } else if name == "WHITESPACE" || name == "COMMENT" {
+ } else if rule.name == "WHITESPACE" || rule.name == "COMMENT" {
let atomic = generate_expr_atomic(rule.expr);
quote! {
@@ -364,7 +382,7 @@ fn generate_expr(expr: OptimizedExpr) -> TokenStream {
}
}
OptimizedExpr::Ident(ident) => {
- let ident = Ident::new(&ident, Span::call_site());
+ let ident = format_ident!("r#{}", ident);
quote! { self::#ident(state) }
}
OptimizedExpr::PeekSlice(start, end_) => {
@@ -510,7 +528,7 @@ fn generate_expr_atomic(expr: OptimizedExpr) -> TokenStream {
}
}
OptimizedExpr::Ident(ident) => {
- let ident = Ident::new(&ident, Span::call_site());
+ let ident = format_ident!("r#{}", ident);
quote! { self::#ident(state) }
}
OptimizedExpr::PeekSlice(start, end_) => {
@@ -661,6 +679,9 @@ fn option_type() -> TokenStream {
mod tests {
use super::*;
+ use proc_macro2::Span;
+ use std::collections::HashMap;
+
#[test]
fn rule_enum_simple() {
let rules = vec![OptimizedRule {
@@ -669,13 +690,23 @@ mod tests {
expr: OptimizedExpr::Ident("g".to_owned()),
}];
+ let mut line_docs = HashMap::new();
+ line_docs.insert("f".to_owned(), "This is rule comment".to_owned());
+
+ let doc_comment = &DocComment {
+ grammar_doc: "Rule doc\nhello".to_owned(),
+ line_docs,
+ };
+
assert_eq!(
- generate_enum(&rules, false).to_string(),
+ generate_enum(&rules, doc_comment, false).to_string(),
quote! {
+ #[doc = "Rule doc\nhello"]
#[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Rule {
- f
+ #[doc = "This is rule comment"]
+ r#f
}
}
.to_string()
@@ -863,7 +894,7 @@ mod tests {
assert_eq!(
generate_expr(expr).to_string(),
quote! {
- self::a(state).or_else(|state| {
+ self::r#a(state).or_else(|state| {
state.sequence(|state| {
state.match_range('a'..'b').and_then(|state| {
super::hidden::skip(state)
@@ -929,7 +960,7 @@ mod tests {
assert_eq!(
generate_expr_atomic(expr).to_string(),
quote! {
- self::a(state).or_else(|state| {
+ self::r#a(state).or_else(|state| {
state.sequence(|state| {
state.match_range('a'..'b').and_then(|state| {
state.lookahead(false, |state| {
@@ -957,14 +988,31 @@ mod tests {
}
#[test]
- fn generate_complete() {
+ fn test_generate_complete() {
let name = Ident::new("MyParser", Span::call_site());
let generics = Generics::default();
- let rules = vec![OptimizedRule {
- name: "a".to_owned(),
- ty: RuleType::Silent,
- expr: OptimizedExpr::Str("b".to_owned()),
- }];
+
+ let rules = vec![
+ OptimizedRule {
+ name: "a".to_owned(),
+ ty: RuleType::Silent,
+ expr: OptimizedExpr::Str("b".to_owned()),
+ },
+ OptimizedRule {
+ name: "if".to_owned(),
+ ty: RuleType::Silent,
+ expr: OptimizedExpr::Ident("a".to_owned()),
+ },
+ ];
+
+ let mut line_docs = HashMap::new();
+ line_docs.insert("if".to_owned(), "If statement".to_owned());
+
+ let doc_comment = &DocComment {
+ line_docs,
+ grammar_doc: "This is Rule doc\nThis is second line".to_owned(),
+ };
+
let defaults = vec!["ANY"];
let result = result_type();
let box_ty = box_type();
@@ -972,15 +1020,18 @@ mod tests {
current_dir.push("test.pest");
let test_path = current_dir.to_str().expect("path contains invalid unicode");
assert_eq!(
- generate(name, &generics, Some(PathBuf::from("test.pest")), rules, defaults, true).to_string(),
+ generate(name, &generics, Some(PathBuf::from("test.pest")), rules, defaults, doc_comment, true).to_string(),
quote! {
#[allow(non_upper_case_globals)]
const _PEST_GRAMMAR_MyParser: &'static str = include_str!(#test_path);
+ #[doc = "This is Rule doc\nThis is second line"]
#[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Rule {
- a
+ r#a,
+ #[doc = "If statement"]
+ r#if
}
#[allow(clippy::all)]
@@ -1009,11 +1060,17 @@ mod tests {
#[inline]
#[allow(non_snake_case, unused_variables)]
- pub fn a(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> {
+ pub fn r#a(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> {
state.match_string("b")
}
#[inline]
+ #[allow(non_snake_case, unused_variables)]
+ pub fn r#if(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> {
+ self::r#a(state)
+ }
+
+ #[inline]
#[allow(dead_code, non_snake_case, unused_variables)]
pub fn ANY(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> {
state.skip(1)
@@ -1025,7 +1082,8 @@ mod tests {
::pest::state(input, |state| {
match rule {
- Rule::a => rules::a(state)
+ Rule::r#a => rules::r#a(state),
+ Rule::r#if => rules::r#if(state)
}
})
}
diff --git a/src/lib.rs b/src/lib.rs
index 2a1203e..f9c118f 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -31,6 +31,7 @@ use syn::{Attribute, DeriveInput, Generics, Ident, Lit, Meta};
#[macro_use]
mod macros;
+mod docs;
mod generator;
use pest_meta::parser::{self, rename_meta_rule, Rule};
@@ -41,39 +42,49 @@ use pest_meta::{optimizer, unwrap_or_report, validator};
/// "include_str" statement (done in pest_derive, but turned off in the local bootstrap).
pub fn derive_parser(input: TokenStream, include_grammar: bool) -> TokenStream {
let ast: DeriveInput = syn::parse2(input).unwrap();
- let (name, generics, content) = parse_derive(ast);
-
- let (data, path) = match content {
- GrammarSource::File(ref path) => {
- let root = env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".into());
-
- // Check whether we can find a file at the path relative to the CARGO_MANIFEST_DIR
- // first.
- //
- // If we cannot find the expected file over there, fallback to the
- // `CARGO_MANIFEST_DIR/src`, which is the old default and kept for convenience
- // reasons.
- // TODO: This could be refactored once `std::path::absolute()` get's stabilized.
- // https://doc.rust-lang.org/std/path/fn.absolute.html
- let path = if Path::new(&root).join(path).exists() {
- Path::new(&root).join(path)
- } else {
- Path::new(&root).join("src/").join(path)
- };
-
- let file_name = match path.file_name() {
- Some(file_name) => file_name,
- None => panic!("grammar attribute should point to a file"),
- };
-
- let data = match read_file(&path) {
- Ok(data) => data,
- Err(error) => panic!("error opening {:?}: {}", file_name, error),
- };
- (data, Some(path.clone()))
+ let (name, generics, contents) = parse_derive(ast);
+
+ let mut data = String::new();
+ let mut path = None;
+
+ for content in contents {
+ let (_data, _path) = match content {
+ GrammarSource::File(ref path) => {
+ let root = env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".into());
+
+ // Check whether we can find a file at the path relative to the CARGO_MANIFEST_DIR
+ // first.
+ //
+ // If we cannot find the expected file over there, fallback to the
+ // `CARGO_MANIFEST_DIR/src`, which is the old default and kept for convenience
+ // reasons.
+ // TODO: This could be refactored once `std::path::absolute()` get's stabilized.
+ // https://doc.rust-lang.org/std/path/fn.absolute.html
+ let path = if Path::new(&root).join(path).exists() {
+ Path::new(&root).join(path)
+ } else {
+ Path::new(&root).join("src/").join(path)
+ };
+
+ let file_name = match path.file_name() {
+ Some(file_name) => file_name,
+ None => panic!("grammar attribute should point to a file"),
+ };
+
+ let data = match read_file(&path) {
+ Ok(data) => data,
+ Err(error) => panic!("error opening {:?}: {}", file_name, error),
+ };
+ (data, Some(path.clone()))
+ }
+ GrammarSource::Inline(content) => (content, None),
+ };
+
+ data.push_str(&_data);
+ if _path.is_some() {
+ path = _path;
}
- GrammarSource::Inline(content) => (content, None),
- };
+ }
let pairs = match parser::parse(Rule::grammar_rules, &data) {
Ok(pairs) => pairs,
@@ -81,10 +92,19 @@ pub fn derive_parser(input: TokenStream, include_grammar: bool) -> TokenStream {
};
let defaults = unwrap_or_report(validator::validate_pairs(pairs.clone()));
+ let doc_comment = docs::consume(pairs.clone());
let ast = unwrap_or_report(parser::consume_rules(pairs));
let optimized = optimizer::optimize(ast);
- generator::generate(name, &generics, path, optimized, defaults, include_grammar)
+ generator::generate(
+ name,
+ &generics,
+ path,
+ optimized,
+ defaults,
+ &doc_comment,
+ include_grammar,
+ )
}
fn read_file<P: AsRef<Path>>(path: P) -> io::Result<String> {
@@ -100,7 +120,7 @@ enum GrammarSource {
Inline(String),
}
-fn parse_derive(ast: DeriveInput) -> (Ident, Generics, GrammarSource) {
+fn parse_derive(ast: DeriveInput) -> (Ident, Generics, Vec<GrammarSource>) {
let name = ast.ident;
let generics = ast.generics;
@@ -115,13 +135,16 @@ fn parse_derive(ast: DeriveInput) -> (Ident, Generics, GrammarSource) {
})
.collect();
- let argument = match grammar.len() {
- 0 => panic!("a grammar file needs to be provided with the #[grammar = \"PATH\"] or #[grammar_inline = \"GRAMMAR CONTENTS\"] attribute"),
- 1 => get_attribute(grammar[0]),
- _ => panic!("only 1 grammar file can be provided"),
- };
+ if grammar.is_empty() {
+ panic!("a grammar file needs to be provided with the #[grammar = \"PATH\"] or #[grammar_inline = \"GRAMMAR CONTENTS\"] attribute");
+ }
- (name, generics, argument)
+ let mut grammar_sources = Vec::with_capacity(grammar.len());
+ for attr in grammar {
+ grammar_sources.push(get_attribute(attr))
+ }
+
+ (name, generics, grammar_sources)
}
fn get_attribute(attr: &Attribute) -> GrammarSource {
@@ -153,8 +176,8 @@ mod tests {
pub struct MyParser<'a, T>;
";
let ast = syn::parse_str(definition).unwrap();
- let (_, _, filename) = parse_derive(ast);
- assert_eq!(filename, GrammarSource::Inline("GRAMMAR".to_string()));
+ let (_, _, filenames) = parse_derive(ast);
+ assert_eq!(filenames, [GrammarSource::Inline("GRAMMAR".to_string())]);
}
#[test]
@@ -165,12 +188,11 @@ mod tests {
pub struct MyParser<'a, T>;
";
let ast = syn::parse_str(definition).unwrap();
- let (_, _, filename) = parse_derive(ast);
- assert_eq!(filename, GrammarSource::File("myfile.pest".to_string()));
+ let (_, _, filenames) = parse_derive(ast);
+ assert_eq!(filenames, [GrammarSource::File("myfile.pest".to_string())]);
}
#[test]
- #[should_panic(expected = "only 1 grammar file can be provided")]
fn derive_multiple_grammars() {
let definition = "
#[other_attr]
@@ -179,7 +201,14 @@ mod tests {
pub struct MyParser<'a, T>;
";
let ast = syn::parse_str(definition).unwrap();
- parse_derive(ast);
+ let (_, _, filenames) = parse_derive(ast);
+ assert_eq!(
+ filenames,
+ [
+ GrammarSource::File("myfile1.pest".to_string()),
+ GrammarSource::File("myfile2.pest".to_string())
+ ]
+ );
}
#[test]
@@ -193,4 +222,51 @@ mod tests {
let ast = syn::parse_str(definition).unwrap();
parse_derive(ast);
}
+
+ #[test]
+ #[should_panic(
+ expected = "a grammar file needs to be provided with the #[grammar = \"PATH\"] or #[grammar_inline = \"GRAMMAR CONTENTS\"] attribute"
+ )]
+ fn derive_no_grammar() {
+ let definition = "
+ #[other_attr]
+ pub struct MyParser<'a, T>;
+ ";
+ let ast = syn::parse_str(definition).unwrap();
+ parse_derive(ast);
+ }
+
+ #[test]
+ fn test_generate_doc() {
+ let input = quote! {
+ #[derive(Parser)]
+ #[grammar = "../tests/test.pest"]
+ pub struct TestParser;
+ };
+
+ let token = super::derive_parser(input, true);
+
+ let expected = quote! {
+ #[doc = "A parser for JSON file.\nAnd this is a example for JSON parser.\n\n indent-4-space"]
+ #[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)]
+ #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+
+ pub enum Rule {
+ #[doc = "Matches foo str, e.g.: `foo`"]
+ r#foo,
+ #[doc = "Matches bar str,\n Indent 2, e.g: `bar` or `foobar`"]
+ r#bar,
+ r#bar1,
+ #[doc = "Matches dar\nMatch dar description"]
+ r#dar
+ }
+ };
+
+ assert!(
+ token.to_string().contains(expected.to_string().as_str()),
+ "{}\n\nExpected to contains:\n{}",
+ token,
+ expected
+ );
+ }
}
diff --git a/tests/test.pest b/tests/test.pest
new file mode 100644
index 0000000..c86a65f
--- /dev/null
+++ b/tests/test.pest
@@ -0,0 +1,20 @@
+//! A parser for JSON file.
+//! And this is a example for JSON parser.
+//!
+//! indent-4-space
+
+/// Matches foo str, e.g.: `foo`
+foo = { "foo" }
+
+/// Matches bar str,
+/// Indent 2, e.g: `bar` or `foobar`
+
+bar = { "bar" | "foobar" }
+
+bar1 = { "bar1" }
+
+/// Matches dar
+
+/// Match dar description
+
+dar = { "da" } \ No newline at end of file