aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorJakub Kotur <qtr@google.com>2020-12-21 17:28:15 +0100
committerJakub Kotur <qtr@google.com>2021-03-05 16:39:23 +0100
commit041839ceabbc67165512fde0d33c91347b758487 (patch)
tree13641ab7afc7aa43b586606c18d53084dedf7ace /tests
parenta679e9b8b7e4ae27a19b81f216e375ea8a9cdb8e (diff)
downloadrayon-041839ceabbc67165512fde0d33c91347b758487.tar.gz
Initial import of rayon-1.5.0.
Bug: 155309706 Change-Id: I6ff7de1cb89d093d7938abf78d586ed76da85b0d
Diffstat (limited to 'tests')
-rw-r--r--tests/chars.rs39
-rw-r--r--tests/clones.rs186
-rw-r--r--tests/collect.rs111
-rw-r--r--tests/cross-pool.rs21
-rw-r--r--tests/debug.rs209
-rw-r--r--tests/intersperse.rs60
-rw-r--r--tests/issue671-unzip.rs17
-rw-r--r--tests/issue671.rs16
-rw-r--r--tests/iter_panic.rs52
-rw-r--r--tests/named-threads.rs24
-rw-r--r--tests/octillion.rs130
-rw-r--r--tests/producer_split_at.rs344
-rw-r--r--tests/sort-panic-safe.rs162
-rw-r--r--tests/str.rs116
14 files changed, 1487 insertions, 0 deletions
diff --git a/tests/chars.rs b/tests/chars.rs
new file mode 100644
index 0000000..ac8e3f3
--- /dev/null
+++ b/tests/chars.rs
@@ -0,0 +1,39 @@
+use rayon::prelude::*;
+use std::char;
+
+#[test]
+fn half_open_correctness() {
+ let low = char::from_u32(0xD800 - 0x7).unwrap();
+ let high = char::from_u32(0xE000 + 0x7).unwrap();
+
+ let range = low..high;
+ let mut chars: Vec<char> = range.into_par_iter().collect();
+ chars.sort();
+
+ assert_eq!(
+ chars,
+ vec![
+ '\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}',
+ '\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}',
+ ]
+ );
+}
+
+#[test]
+fn closed_correctness() {
+ let low = char::from_u32(0xD800 - 0x7).unwrap();
+ let high = char::from_u32(0xE000 + 0x7).unwrap();
+
+ let range = low..=high;
+ let mut chars: Vec<char> = range.into_par_iter().collect();
+ chars.sort();
+
+ assert_eq!(
+ chars,
+ vec![
+ '\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}',
+ '\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}',
+ '\u{E007}',
+ ]
+ );
+}
diff --git a/tests/clones.rs b/tests/clones.rs
new file mode 100644
index 0000000..fa93f8a
--- /dev/null
+++ b/tests/clones.rs
@@ -0,0 +1,186 @@
+use rayon::prelude::*;
+
+fn check<I>(iter: I)
+where
+ I: ParallelIterator + Clone,
+ I::Item: std::fmt::Debug + PartialEq,
+{
+ let a: Vec<_> = iter.clone().collect();
+ let b: Vec<_> = iter.collect();
+ assert_eq!(a, b);
+}
+
+#[test]
+fn clone_binary_heap() {
+ use std::collections::BinaryHeap;
+ let heap: BinaryHeap<_> = (0..1000).collect();
+ check(heap.par_iter());
+ check(heap.into_par_iter());
+}
+
+#[test]
+fn clone_btree_map() {
+ use std::collections::BTreeMap;
+ let map: BTreeMap<_, _> = (0..1000).enumerate().collect();
+ check(map.par_iter());
+}
+
+#[test]
+fn clone_btree_set() {
+ use std::collections::BTreeSet;
+ let set: BTreeSet<_> = (0..1000).collect();
+ check(set.par_iter());
+}
+
+#[test]
+fn clone_hash_map() {
+ use std::collections::HashMap;
+ let map: HashMap<_, _> = (0..1000).enumerate().collect();
+ check(map.par_iter());
+}
+
+#[test]
+fn clone_hash_set() {
+ use std::collections::HashSet;
+ let set: HashSet<_> = (0..1000).collect();
+ check(set.par_iter());
+}
+
+#[test]
+fn clone_linked_list() {
+ use std::collections::LinkedList;
+ let list: LinkedList<_> = (0..1000).collect();
+ check(list.par_iter());
+ check(list.into_par_iter());
+}
+
+#[test]
+fn clone_vec_deque() {
+ use std::collections::VecDeque;
+ let deque: VecDeque<_> = (0..1000).collect();
+ check(deque.par_iter());
+ check(deque.into_par_iter());
+}
+
+#[test]
+fn clone_option() {
+ let option = Some(0);
+ check(option.par_iter());
+ check(option.into_par_iter());
+}
+
+#[test]
+fn clone_result() {
+ let result = Ok::<_, ()>(0);
+ check(result.par_iter());
+ check(result.into_par_iter());
+}
+
+#[test]
+fn clone_range() {
+ check((0..1000).into_par_iter());
+}
+
+#[test]
+fn clone_range_inclusive() {
+ check((0..=1000).into_par_iter());
+}
+
+#[test]
+fn clone_str() {
+ let s = include_str!("clones.rs");
+ check(s.par_chars());
+ check(s.par_lines());
+ check(s.par_split('\n'));
+ check(s.par_split_terminator('\n'));
+ check(s.par_split_whitespace());
+}
+
+#[test]
+fn clone_vec() {
+ let v: Vec<_> = (0..1000).collect();
+ check(v.par_iter());
+ check(v.par_chunks(42));
+ check(v.par_chunks_exact(42));
+ check(v.par_windows(42));
+ check(v.par_split(|x| x % 3 == 0));
+ check(v.into_par_iter());
+}
+
+#[test]
+fn clone_adaptors() {
+ let v: Vec<_> = (0..1000).map(Some).collect();
+ check(v.par_iter().chain(&v));
+ check(v.par_iter().cloned());
+ check(v.par_iter().copied());
+ check(v.par_iter().enumerate());
+ check(v.par_iter().filter(|_| true));
+ check(v.par_iter().filter_map(|x| *x));
+ check(v.par_iter().flat_map(|x| *x));
+ check(v.par_iter().flat_map_iter(|x| *x));
+ check(v.par_iter().flatten());
+ check(v.par_iter().flatten_iter());
+ check(v.par_iter().with_max_len(1).fold(|| 0, |x, _| x));
+ check(v.par_iter().with_max_len(1).fold_with(0, |x, _| x));
+ check(v.par_iter().with_max_len(1).try_fold(|| 0, |_, &x| x));
+ check(v.par_iter().with_max_len(1).try_fold_with(0, |_, &x| x));
+ check(v.par_iter().inspect(|_| ()));
+ check(v.par_iter().update(|_| ()));
+ check(v.par_iter().interleave(&v));
+ check(v.par_iter().interleave_shortest(&v));
+ check(v.par_iter().intersperse(&None));
+ check(v.par_iter().chunks(3));
+ check(v.par_iter().map(|x| x));
+ check(v.par_iter().map_with(0, |_, x| x));
+ check(v.par_iter().map_init(|| 0, |_, x| x));
+ check(v.par_iter().panic_fuse());
+ check(v.par_iter().positions(|_| true));
+ check(v.par_iter().rev());
+ check(v.par_iter().skip(1));
+ check(v.par_iter().take(1));
+ check(v.par_iter().cloned().while_some());
+ check(v.par_iter().with_max_len(1));
+ check(v.par_iter().with_min_len(1));
+ check(v.par_iter().zip(&v));
+ check(v.par_iter().zip_eq(&v));
+ check(v.par_iter().step_by(2));
+}
+
+#[test]
+fn clone_empty() {
+ check(rayon::iter::empty::<i32>());
+}
+
+#[test]
+fn clone_once() {
+ check(rayon::iter::once(10));
+}
+
+#[test]
+fn clone_repeat() {
+ let x: Option<i32> = None;
+ check(rayon::iter::repeat(x).while_some());
+ check(rayon::iter::repeatn(x, 1000));
+}
+
+#[test]
+fn clone_splitter() {
+ check(rayon::iter::split(0..1000, |x| (x, None)));
+}
+
+#[test]
+fn clone_multizip() {
+ let v: &Vec<_> = &(0..1000).collect();
+ check((v,).into_par_iter());
+ check((v, v).into_par_iter());
+ check((v, v, v).into_par_iter());
+ check((v, v, v, v).into_par_iter());
+ check((v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
+}
diff --git a/tests/collect.rs b/tests/collect.rs
new file mode 100644
index 0000000..48b80f6
--- /dev/null
+++ b/tests/collect.rs
@@ -0,0 +1,111 @@
+use rayon::prelude::*;
+
+use std::panic;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+use std::sync::Mutex;
+
+#[test]
+fn collect_drop_on_unwind() {
+ struct Recorddrop<'a>(i64, &'a Mutex<Vec<i64>>);
+
+ impl<'a> Drop for Recorddrop<'a> {
+ fn drop(&mut self) {
+ self.1.lock().unwrap().push(self.0);
+ }
+ }
+
+ let test_collect_panic = |will_panic: bool| {
+ let test_vec_len = 1024;
+ let panic_point = 740;
+
+ let mut inserts = Mutex::new(Vec::new());
+ let mut drops = Mutex::new(Vec::new());
+
+ let mut a = (0..test_vec_len).collect::<Vec<_>>();
+ let b = (0..test_vec_len).collect::<Vec<_>>();
+
+ let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ let mut result = Vec::new();
+ a.par_iter_mut()
+ .zip(&b)
+ .map(|(&mut a, &b)| {
+ if a > panic_point && will_panic {
+ panic!("unwinding for test");
+ }
+ let elt = a + b;
+ inserts.lock().unwrap().push(elt);
+ Recorddrop(elt, &drops)
+ })
+ .collect_into_vec(&mut result);
+
+ // If we reach this point, this must pass
+ assert_eq!(a.len(), result.len());
+ }));
+
+ let inserts = inserts.get_mut().unwrap();
+ let drops = drops.get_mut().unwrap();
+ println!("{:?}", inserts);
+ println!("{:?}", drops);
+
+ assert_eq!(inserts.len(), drops.len(), "Incorrect number of drops");
+ // sort to normalize order
+ inserts.sort();
+ drops.sort();
+ assert_eq!(inserts, drops, "Incorrect elements were dropped");
+ };
+
+ for &should_panic in &[true, false] {
+ test_collect_panic(should_panic);
+ }
+}
+
+#[test]
+fn collect_drop_on_unwind_zst() {
+ static INSERTS: AtomicUsize = AtomicUsize::new(0);
+ static DROPS: AtomicUsize = AtomicUsize::new(0);
+
+ struct RecorddropZst;
+
+ impl Drop for RecorddropZst {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let test_collect_panic = |will_panic: bool| {
+ INSERTS.store(0, Ordering::SeqCst);
+ DROPS.store(0, Ordering::SeqCst);
+
+ let test_vec_len = 1024;
+ let panic_point = 740;
+
+ let a = (0..test_vec_len).collect::<Vec<_>>();
+
+ let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ let mut result = Vec::new();
+ a.par_iter()
+ .map(|&a| {
+ if a > panic_point && will_panic {
+ panic!("unwinding for test");
+ }
+ INSERTS.fetch_add(1, Ordering::SeqCst);
+ RecorddropZst
+ })
+ .collect_into_vec(&mut result);
+
+ // If we reach this point, this must pass
+ assert_eq!(a.len(), result.len());
+ }));
+
+ let inserts = INSERTS.load(Ordering::SeqCst);
+ let drops = DROPS.load(Ordering::SeqCst);
+
+ assert_eq!(inserts, drops, "Incorrect number of drops");
+ assert!(will_panic || drops == test_vec_len)
+ };
+
+ for &should_panic in &[true, false] {
+ test_collect_panic(should_panic);
+ }
+}
diff --git a/tests/cross-pool.rs b/tests/cross-pool.rs
new file mode 100644
index 0000000..f0a2128
--- /dev/null
+++ b/tests/cross-pool.rs
@@ -0,0 +1,21 @@
+use rayon::prelude::*;
+use rayon::ThreadPoolBuilder;
+
+#[test]
+fn cross_pool_busy() {
+ let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+ let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+
+ let n: i32 = 100;
+ let sum: i32 = pool1.install(move || {
+ // Each item will block on pool2, but pool1 can continue processing other work from the
+ // parallel iterator in the meantime. There's a chance that pool1 will still be awake to
+ // see the latch set without being tickled, and then it will drop that stack job. The latch
+ // internals must not assume that the job will still be alive after it's set!
+ (1..=n)
+ .into_par_iter()
+ .map(|i| pool2.install(move || i))
+ .sum()
+ });
+ assert_eq!(sum, n * (n + 1) / 2);
+}
diff --git a/tests/debug.rs b/tests/debug.rs
new file mode 100644
index 0000000..fb11110
--- /dev/null
+++ b/tests/debug.rs
@@ -0,0 +1,209 @@
+use rayon::prelude::*;
+use std::fmt::Debug;
+
+fn check<I>(iter: I)
+where
+ I: ParallelIterator + Debug,
+{
+ println!("{:?}", iter);
+}
+
+#[test]
+fn debug_binary_heap() {
+ use std::collections::BinaryHeap;
+ let mut heap: BinaryHeap<_> = (0..10).collect();
+ check(heap.par_iter());
+ check(heap.par_drain());
+ check(heap.into_par_iter());
+}
+
+#[test]
+fn debug_btree_map() {
+ use std::collections::BTreeMap;
+ let mut map: BTreeMap<_, _> = (0..10).enumerate().collect();
+ check(map.par_iter());
+ check(map.par_iter_mut());
+ check(map.into_par_iter());
+}
+
+#[test]
+fn debug_btree_set() {
+ use std::collections::BTreeSet;
+ let set: BTreeSet<_> = (0..10).collect();
+ check(set.par_iter());
+ check(set.into_par_iter());
+}
+
+#[test]
+fn debug_hash_map() {
+ use std::collections::HashMap;
+ let mut map: HashMap<_, _> = (0..10).enumerate().collect();
+ check(map.par_iter());
+ check(map.par_iter_mut());
+ check(map.par_drain());
+ check(map.into_par_iter());
+}
+
+#[test]
+fn debug_hash_set() {
+ use std::collections::HashSet;
+ let mut set: HashSet<_> = (0..10).collect();
+ check(set.par_iter());
+ check(set.par_drain());
+ check(set.into_par_iter());
+}
+
+#[test]
+fn debug_linked_list() {
+ use std::collections::LinkedList;
+ let mut list: LinkedList<_> = (0..10).collect();
+ check(list.par_iter());
+ check(list.par_iter_mut());
+ check(list.into_par_iter());
+}
+
+#[test]
+fn debug_vec_deque() {
+ use std::collections::VecDeque;
+ let mut deque: VecDeque<_> = (0..10).collect();
+ check(deque.par_iter());
+ check(deque.par_iter_mut());
+ check(deque.par_drain(..));
+ check(deque.into_par_iter());
+}
+
+#[test]
+fn debug_option() {
+ let mut option = Some(0);
+ check(option.par_iter());
+ check(option.par_iter_mut());
+ check(option.into_par_iter());
+}
+
+#[test]
+fn debug_result() {
+ let mut result = Ok::<_, ()>(0);
+ check(result.par_iter());
+ check(result.par_iter_mut());
+ check(result.into_par_iter());
+}
+
+#[test]
+fn debug_range() {
+ check((0..10).into_par_iter());
+}
+
+#[test]
+fn debug_range_inclusive() {
+ check((0..=10).into_par_iter());
+}
+
+#[test]
+fn debug_str() {
+ let s = "a b c d\ne f g";
+ check(s.par_chars());
+ check(s.par_lines());
+ check(s.par_split('\n'));
+ check(s.par_split_terminator('\n'));
+ check(s.par_split_whitespace());
+}
+
+#[test]
+fn debug_string() {
+ let mut s = "a b c d\ne f g".to_string();
+ s.par_drain(..);
+}
+
+#[test]
+fn debug_vec() {
+ let mut v: Vec<_> = (0..10).collect();
+ check(v.par_iter());
+ check(v.par_iter_mut());
+ check(v.par_chunks(42));
+ check(v.par_chunks_exact(42));
+ check(v.par_chunks_mut(42));
+ check(v.par_chunks_exact_mut(42));
+ check(v.par_windows(42));
+ check(v.par_split(|x| x % 3 == 0));
+ check(v.par_split_mut(|x| x % 3 == 0));
+ check(v.par_drain(..));
+ check(v.into_par_iter());
+}
+
+#[test]
+fn debug_adaptors() {
+ let v: Vec<_> = (0..10).collect();
+ check(v.par_iter().chain(&v));
+ check(v.par_iter().cloned());
+ check(v.par_iter().copied());
+ check(v.par_iter().enumerate());
+ check(v.par_iter().filter(|_| true));
+ check(v.par_iter().filter_map(Some));
+ check(v.par_iter().flat_map(Some));
+ check(v.par_iter().flat_map_iter(Some));
+ check(v.par_iter().map(Some).flatten());
+ check(v.par_iter().map(Some).flatten_iter());
+ check(v.par_iter().fold(|| 0, |x, _| x));
+ check(v.par_iter().fold_with(0, |x, _| x));
+ check(v.par_iter().try_fold(|| 0, |x, _| Some(x)));
+ check(v.par_iter().try_fold_with(0, |x, _| Some(x)));
+ check(v.par_iter().inspect(|_| ()));
+ check(v.par_iter().update(|_| ()));
+ check(v.par_iter().interleave(&v));
+ check(v.par_iter().interleave_shortest(&v));
+ check(v.par_iter().intersperse(&-1));
+ check(v.par_iter().chunks(3));
+ check(v.par_iter().map(|x| x));
+ check(v.par_iter().map_with(0, |_, x| x));
+ check(v.par_iter().map_init(|| 0, |_, x| x));
+ check(v.par_iter().panic_fuse());
+ check(v.par_iter().positions(|_| true));
+ check(v.par_iter().rev());
+ check(v.par_iter().skip(1));
+ check(v.par_iter().take(1));
+ check(v.par_iter().map(Some).while_some());
+ check(v.par_iter().with_max_len(1));
+ check(v.par_iter().with_min_len(1));
+ check(v.par_iter().zip(&v));
+ check(v.par_iter().zip_eq(&v));
+ check(v.par_iter().step_by(2));
+}
+
+#[test]
+fn debug_empty() {
+ check(rayon::iter::empty::<i32>());
+}
+
+#[test]
+fn debug_once() {
+ check(rayon::iter::once(10));
+}
+
+#[test]
+fn debug_repeat() {
+ let x: Option<i32> = None;
+ check(rayon::iter::repeat(x));
+ check(rayon::iter::repeatn(x, 10));
+}
+
+#[test]
+fn debug_splitter() {
+ check(rayon::iter::split(0..10, |x| (x, None)));
+}
+
+#[test]
+fn debug_multizip() {
+ let v: &Vec<_> = &(0..10).collect();
+ check((v,).into_par_iter());
+ check((v, v).into_par_iter());
+ check((v, v, v).into_par_iter());
+ check((v, v, v, v).into_par_iter());
+ check((v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
+ check((v, v, v, v, v, v, v, v, v, v, v, v).into_par_iter());
+}
diff --git a/tests/intersperse.rs b/tests/intersperse.rs
new file mode 100644
index 0000000..aaa5b65
--- /dev/null
+++ b/tests/intersperse.rs
@@ -0,0 +1,60 @@
+use rayon::prelude::*;
+
+#[test]
+fn check_intersperse() {
+ let v: Vec<_> = (0..1000).into_par_iter().intersperse(-1).collect();
+ assert_eq!(v.len(), 1999);
+ for (i, x) in v.into_iter().enumerate() {
+ assert_eq!(x, if i % 2 == 0 { i as i32 / 2 } else { -1 });
+ }
+}
+
+#[test]
+fn check_intersperse_again() {
+ let v: Vec<_> = (0..1000)
+ .into_par_iter()
+ .intersperse(-1)
+ .intersperse(-2)
+ .collect();
+ assert_eq!(v.len(), 3997);
+ for (i, x) in v.into_iter().enumerate() {
+ let y = match i % 4 {
+ 0 => i as i32 / 4,
+ 2 => -1,
+ _ => -2,
+ };
+ assert_eq!(x, y);
+ }
+}
+
+#[test]
+fn check_intersperse_unindexed() {
+ let v: Vec<_> = (0..1000).map(|i| i.to_string()).collect();
+ let s = v.join(",");
+ let s2 = v.join(";");
+ let par: String = s.par_split(',').intersperse(";").collect();
+ assert_eq!(par, s2);
+}
+
+#[test]
+fn check_intersperse_producer() {
+ (0..1000)
+ .into_par_iter()
+ .intersperse(-1)
+ .zip_eq(0..1999)
+ .for_each(|(x, i)| {
+ assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
+ });
+}
+
+#[test]
+fn check_intersperse_rev() {
+ (0..1000)
+ .into_par_iter()
+ .intersperse(-1)
+ .zip_eq(0..1999)
+ .rev()
+ .for_each(|(x, i)| {
+ assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 });
+ });
+}
diff --git a/tests/issue671-unzip.rs b/tests/issue671-unzip.rs
new file mode 100644
index 0000000..c9af7e6
--- /dev/null
+++ b/tests/issue671-unzip.rs
@@ -0,0 +1,17 @@
+#![type_length_limit = "10000"]
+
+use rayon::prelude::*;
+
+#[test]
+fn type_length_limit() {
+ let input = vec![1, 2, 3, 4, 5];
+ let (indexes, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = input
+ .par_iter()
+ .map(|x| (x * x, x * x * x))
+ .enumerate()
+ .unzip();
+
+ drop(indexes);
+ drop(squares);
+ drop(cubes);
+}
diff --git a/tests/issue671.rs b/tests/issue671.rs
new file mode 100644
index 0000000..9d0953f
--- /dev/null
+++ b/tests/issue671.rs
@@ -0,0 +1,16 @@
+#![type_length_limit = "500000"]
+
+use rayon::prelude::*;
+
+#[test]
+fn type_length_limit() {
+ let _ = Vec::<Result<(), ()>>::new()
+ .into_par_iter()
+ .map(|x| x)
+ .map(|x| x)
+ .map(|x| x)
+ .map(|x| x)
+ .map(|x| x)
+ .map(|x| x)
+ .collect::<Result<(), ()>>();
+}
diff --git a/tests/iter_panic.rs b/tests/iter_panic.rs
new file mode 100644
index 0000000..4885a28
--- /dev/null
+++ b/tests/iter_panic.rs
@@ -0,0 +1,52 @@
+use rayon::prelude::*;
+use rayon::ThreadPoolBuilder;
+use std::ops::Range;
+use std::panic::{self, UnwindSafe};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+const ITER: Range<i32> = 0..0x1_0000;
+const PANIC: i32 = 0xC000;
+
+fn check(&i: &i32) {
+ if i == PANIC {
+ panic!("boom")
+ }
+}
+
+#[test]
+#[should_panic(expected = "boom")]
+fn iter_panic() {
+ ITER.into_par_iter().for_each(|i| check(&i));
+}
+
+#[test]
+fn iter_panic_fuse() {
+ // We only use a single thread in order to make the behavior
+ // of 'panic_fuse' deterministic
+ let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
+
+ pool.install(|| {
+ fn count(iter: impl ParallelIterator + UnwindSafe) -> usize {
+ let count = AtomicUsize::new(0);
+ let result = panic::catch_unwind(|| {
+ iter.for_each(|_| {
+ count.fetch_add(1, Ordering::Relaxed);
+ });
+ });
+ assert!(result.is_err());
+ count.into_inner()
+ }
+
+ // Without `panic_fuse()`, we'll reach every item except the panicking one.
+ let expected = ITER.len() - 1;
+ let iter = ITER.into_par_iter().with_max_len(1);
+ assert_eq!(count(iter.clone().inspect(check)), expected);
+
+ // With `panic_fuse()` anywhere in the chain, we'll reach fewer items.
+ assert!(count(iter.clone().inspect(check).panic_fuse()) < expected);
+ assert!(count(iter.clone().panic_fuse().inspect(check)) < expected);
+
+ // Try in reverse to be sure we hit the producer case.
+ assert!(count(iter.clone().panic_fuse().inspect(check).rev()) < expected);
+ });
+}
diff --git a/tests/named-threads.rs b/tests/named-threads.rs
new file mode 100644
index 0000000..fd1b0be
--- /dev/null
+++ b/tests/named-threads.rs
@@ -0,0 +1,24 @@
+use std::collections::HashSet;
+
+use rayon::prelude::*;
+use rayon::*;
+
+#[test]
+fn named_threads() {
+ ThreadPoolBuilder::new()
+ .thread_name(|i| format!("hello-name-test-{}", i))
+ .build_global()
+ .unwrap();
+
+ const N: usize = 10000;
+
+ let thread_names = (0..N)
+ .into_par_iter()
+ .flat_map(|_| ::std::thread::current().name().map(str::to_owned))
+ .collect::<HashSet<String>>();
+
+ let all_contains_name = thread_names
+ .iter()
+ .all(|name| name.starts_with("hello-name-test-"));
+ assert!(all_contains_name);
+}
diff --git a/tests/octillion.rs b/tests/octillion.rs
new file mode 100644
index 0000000..cff2b11
--- /dev/null
+++ b/tests/octillion.rs
@@ -0,0 +1,130 @@
+use rayon::prelude::*;
+
+const OCTILLION: u128 = 1_000_000_000_000_000_000_000_000_000;
+
+/// Produce a parallel iterator for 0u128..10²⁷
+fn octillion() -> rayon::range::Iter<u128> {
+ (0..OCTILLION).into_par_iter()
+}
+
+/// Produce a parallel iterator for 0u128..=10²⁷
+fn octillion_inclusive() -> rayon::range_inclusive::Iter<u128> {
+ (0..=OCTILLION).into_par_iter()
+}
+
+/// Produce a parallel iterator for 0u128..10²⁷ using `flat_map`
+fn octillion_flat() -> impl ParallelIterator<Item = u128> {
+ (0u32..1_000_000_000)
+ .into_par_iter()
+ .with_max_len(1_000)
+ .map(|i| u64::from(i) * 1_000_000_000)
+ .flat_map(|i| {
+ (0u32..1_000_000_000)
+ .into_par_iter()
+ .with_max_len(1_000)
+ .map(move |j| i + u64::from(j))
+ })
+ .map(|i| u128::from(i) * 1_000_000_000)
+ .flat_map(|i| {
+ (0u32..1_000_000_000)
+ .into_par_iter()
+ .with_max_len(1_000)
+ .map(move |j| i + u128::from(j))
+ })
+}
+
+// NOTE: `find_first` and `find_last` currently take too long on 32-bit targets,
+// because the `AtomicUsize` match position has much too limited resolution.
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_first_octillion() {
+ let x = octillion().find_first(|_| true);
+ assert_eq!(x, Some(0));
+}
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_first_octillion_inclusive() {
+ let x = octillion_inclusive().find_first(|_| true);
+ assert_eq!(x, Some(0));
+}
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_first_octillion_flat() {
+ let x = octillion_flat().find_first(|_| true);
+ assert_eq!(x, Some(0));
+}
+
+fn two_threads<F: Send + FnOnce() -> R, R: Send>(f: F) -> R {
+ // FIXME: If we don't use at least two threads, then we end up walking
+ // through the entire iterator sequentially, without the benefit of any
+ // short-circuiting. We probably don't want testing to wait that long. ;)
+ let builder = rayon::ThreadPoolBuilder::new().num_threads(2);
+ let pool = builder.build().unwrap();
+
+ pool.install(f)
+}
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_last_octillion() {
+ // It would be nice if `find_last` could prioritize the later splits,
+ // basically flipping the `join` args, without needing indexed `rev`.
+ // (or could we have an unindexed `rev`?)
+ let x = two_threads(|| octillion().find_last(|_| true));
+ assert_eq!(x, Some(OCTILLION - 1));
+}
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_last_octillion_inclusive() {
+ let x = two_threads(|| octillion_inclusive().find_last(|_| true));
+ assert_eq!(x, Some(OCTILLION));
+}
+
+#[test]
+#[cfg_attr(not(target_pointer_width = "64"), ignore)]
+fn find_last_octillion_flat() {
+ let x = two_threads(|| octillion_flat().find_last(|_| true));
+ assert_eq!(x, Some(OCTILLION - 1));
+}
+
+#[test]
+fn find_any_octillion() {
+ let x = two_threads(|| octillion().find_any(|x| *x > OCTILLION / 2));
+ assert!(x.is_some());
+}
+
+#[test]
+fn find_any_octillion_flat() {
+ let x = two_threads(|| octillion_flat().find_any(|x| *x > OCTILLION / 2));
+ assert!(x.is_some());
+}
+
+#[test]
+fn filter_find_any_octillion() {
+ let x = two_threads(|| {
+ octillion()
+ .filter(|x| *x > OCTILLION / 2)
+ .find_any(|_| true)
+ });
+ assert!(x.is_some());
+}
+
+#[test]
+fn filter_find_any_octillion_flat() {
+ let x = two_threads(|| {
+ octillion_flat()
+ .filter(|x| *x > OCTILLION / 2)
+ .find_any(|_| true)
+ });
+ assert!(x.is_some());
+}
+
+#[test]
+fn fold_find_any_octillion_flat() {
+ let x = two_threads(|| octillion_flat().fold(|| (), |_, _| ()).find_any(|_| true));
+ assert!(x.is_some());
+}
diff --git a/tests/producer_split_at.rs b/tests/producer_split_at.rs
new file mode 100644
index 0000000..752bc3e
--- /dev/null
+++ b/tests/producer_split_at.rs
@@ -0,0 +1,344 @@
+use rayon::iter::plumbing::*;
+use rayon::prelude::*;
+
+/// Stress-test indexes for `Producer::split_at`.
+fn check<F, I>(expected: &[I::Item], mut f: F)
+where
+ F: FnMut() -> I,
+ I: IntoParallelIterator,
+ I::Iter: IndexedParallelIterator,
+ I::Item: PartialEq + std::fmt::Debug,
+{
+ map_triples(expected.len() + 1, |i, j, k| {
+ Split::forward(f(), i, j, k, expected);
+ Split::reverse(f(), i, j, k, expected);
+ });
+}
+
+fn map_triples<F>(end: usize, mut f: F)
+where
+ F: FnMut(usize, usize, usize),
+{
+ for i in 0..end {
+ for j in i..end {
+ for k in j..end {
+ f(i, j, k);
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Split {
+ i: usize,
+ j: usize,
+ k: usize,
+ reverse: bool,
+}
+
+impl Split {
+ fn forward<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
+ where
+ I: IntoParallelIterator,
+ I::Iter: IndexedParallelIterator,
+ I::Item: PartialEq + std::fmt::Debug,
+ {
+ let result = iter.into_par_iter().with_producer(Split {
+ i,
+ j,
+ k,
+ reverse: false,
+ });
+ assert_eq!(result, expected);
+ }
+
+ fn reverse<I>(iter: I, i: usize, j: usize, k: usize, expected: &[I::Item])
+ where
+ I: IntoParallelIterator,
+ I::Iter: IndexedParallelIterator,
+ I::Item: PartialEq + std::fmt::Debug,
+ {
+ let result = iter.into_par_iter().with_producer(Split {
+ i,
+ j,
+ k,
+ reverse: true,
+ });
+ assert!(result.iter().eq(expected.iter().rev()));
+ }
+}
+
+impl<T> ProducerCallback<T> for Split {
+ type Output = Vec<T>;
+
+ fn callback<P>(self, producer: P) -> Self::Output
+ where
+ P: Producer<Item = T>,
+ {
+ println!("{:?}", self);
+
+ // Splitting the outer indexes first gets us an arbitrary mid section,
+ // which we then split further to get full test coverage.
+ let (left, d) = producer.split_at(self.k);
+ let (a, mid) = left.split_at(self.i);
+ let (b, c) = mid.split_at(self.j - self.i);
+
+ let a = a.into_iter();
+ let b = b.into_iter();
+ let c = c.into_iter();
+ let d = d.into_iter();
+
+ check_len(&a, self.i);
+ check_len(&b, self.j - self.i);
+ check_len(&c, self.k - self.j);
+
+ let chain = a.chain(b).chain(c).chain(d);
+ if self.reverse {
+ chain.rev().collect()
+ } else {
+ chain.collect()
+ }
+ }
+}
+
+fn check_len<I: ExactSizeIterator>(iter: &I, len: usize) {
+ assert_eq!(iter.size_hint(), (len, Some(len)));
+ assert_eq!(iter.len(), len);
+}
+
+// **** Base Producers ****
+
+#[test]
+fn empty() {
+ let v = vec![42];
+ check(&v[..0], rayon::iter::empty);
+}
+
+#[test]
+fn once() {
+ let v = vec![42];
+ check(&v, || rayon::iter::once(42));
+}
+
+#[test]
+fn option() {
+ let v = vec![42];
+ check(&v, || Some(42));
+}
+
+#[test]
+fn range() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || 0..10);
+}
+
+#[test]
+fn range_inclusive() {
+ let v: Vec<_> = (0u16..=10).collect();
+ check(&v, || 0u16..=10);
+}
+
+#[test]
+fn repeatn() {
+ let v: Vec<_> = std::iter::repeat(1).take(5).collect();
+ check(&v, || rayon::iter::repeatn(1, 5));
+}
+
+#[test]
+fn slice_iter() {
+ let s: Vec<_> = (0..10).collect();
+ let v: Vec<_> = s.iter().collect();
+ check(&v, || &s);
+}
+
+#[test]
+fn slice_iter_mut() {
+ let mut s: Vec<_> = (0..10).collect();
+ let mut v: Vec<_> = s.clone();
+ let expected: Vec<_> = v.iter_mut().collect();
+
+ map_triples(expected.len() + 1, |i, j, k| {
+ Split::forward(s.par_iter_mut(), i, j, k, &expected);
+ Split::reverse(s.par_iter_mut(), i, j, k, &expected);
+ });
+}
+
+#[test]
+fn slice_chunks() {
+ let s: Vec<_> = (0..10).collect();
+ for len in 1..s.len() + 2 {
+ let v: Vec<_> = s.chunks(len).collect();
+ check(&v, || s.par_chunks(len));
+ }
+}
+
+#[test]
+fn slice_chunks_exact() {
+ let s: Vec<_> = (0..10).collect();
+ for len in 1..s.len() + 2 {
+ let v: Vec<_> = s.chunks_exact(len).collect();
+ check(&v, || s.par_chunks_exact(len));
+ }
+}
+
+#[test]
+fn slice_chunks_mut() {
+ let mut s: Vec<_> = (0..10).collect();
+ let mut v: Vec<_> = s.clone();
+ for len in 1..s.len() + 2 {
+ let expected: Vec<_> = v.chunks_mut(len).collect();
+ map_triples(expected.len() + 1, |i, j, k| {
+ Split::forward(s.par_chunks_mut(len), i, j, k, &expected);
+ Split::reverse(s.par_chunks_mut(len), i, j, k, &expected);
+ });
+ }
+}
+
+#[test]
+fn slice_chunks_exact_mut() {
+ let mut s: Vec<_> = (0..10).collect();
+ let mut v: Vec<_> = s.clone();
+ for len in 1..s.len() + 2 {
+ let expected: Vec<_> = v.chunks_exact_mut(len).collect();
+ map_triples(expected.len() + 1, |i, j, k| {
+ Split::forward(s.par_chunks_exact_mut(len), i, j, k, &expected);
+ Split::reverse(s.par_chunks_exact_mut(len), i, j, k, &expected);
+ });
+ }
+}
+
+#[test]
+fn slice_windows() {
+ let s: Vec<_> = (0..10).collect();
+ let v: Vec<_> = s.windows(2).collect();
+ check(&v, || s.par_windows(2));
+}
+
+#[test]
+fn vec() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.clone());
+}
+
+// **** Adaptors ****
+
+#[test]
+fn chain() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..5).into_par_iter().chain(5..10));
+}
+
+#[test]
+fn cloned() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.par_iter().cloned());
+}
+
+#[test]
+fn copied() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.par_iter().copied());
+}
+
+#[test]
+fn enumerate() {
+ let v: Vec<_> = (0..10).enumerate().collect();
+ check(&v, || (0..10).into_par_iter().enumerate());
+}
+
+#[test]
+fn step_by() {
+ let v: Vec<_> = (0..10).step_by(2).collect();
+ check(&v, || (0..10).into_par_iter().step_by(2))
+}
+
+#[test]
+fn step_by_unaligned() {
+ let v: Vec<_> = (0..10).step_by(3).collect();
+ check(&v, || (0..10).into_par_iter().step_by(3))
+}
+
+#[test]
+fn inspect() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..10).into_par_iter().inspect(|_| ()));
+}
+
+#[test]
+fn update() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..10).into_par_iter().update(|_| ()));
+}
+
+#[test]
+fn interleave() {
+ let v = [0, 10, 1, 11, 2, 12, 3, 4];
+ check(&v, || (0..5).into_par_iter().interleave(10..13));
+ check(&v[..6], || (0..3).into_par_iter().interleave(10..13));
+
+ let v = [0, 10, 1, 11, 2, 12, 13, 14];
+ check(&v, || (0..3).into_par_iter().interleave(10..15));
+}
+
+#[test]
+fn intersperse() {
+ let v = [0, -1, 1, -1, 2, -1, 3, -1, 4];
+ check(&v, || (0..5).into_par_iter().intersperse(-1));
+}
+
+#[test]
+fn chunks() {
+ let s: Vec<_> = (0..10).collect();
+ let v: Vec<_> = s.chunks(2).map(|c| c.to_vec()).collect();
+ check(&v, || s.par_iter().cloned().chunks(2));
+}
+
+#[test]
+fn map() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.par_iter().map(Clone::clone));
+}
+
+#[test]
+fn map_with() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.par_iter().map_with(vec![0], |_, &x| x));
+}
+
+#[test]
+fn map_init() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || v.par_iter().map_init(|| vec![0], |_, &x| x));
+}
+
+#[test]
+fn panic_fuse() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..10).into_par_iter().panic_fuse());
+}
+
+#[test]
+fn rev() {
+ let v: Vec<_> = (0..10).rev().collect();
+ check(&v, || (0..10).into_par_iter().rev());
+}
+
+#[test]
+fn with_max_len() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..10).into_par_iter().with_max_len(1));
+}
+
+#[test]
+fn with_min_len() {
+ let v: Vec<_> = (0..10).collect();
+ check(&v, || (0..10).into_par_iter().with_min_len(1));
+}
+
+#[test]
+fn zip() {
+ let v: Vec<_> = (0..10).zip(10..20).collect();
+ check(&v, || (0..10).into_par_iter().zip(10..20));
+ check(&v[..5], || (0..5).into_par_iter().zip(10..20));
+ check(&v[..5], || (0..10).into_par_iter().zip(10..15));
+}
diff --git a/tests/sort-panic-safe.rs b/tests/sort-panic-safe.rs
new file mode 100644
index 0000000..7c50505
--- /dev/null
+++ b/tests/sort-panic-safe.rs
@@ -0,0 +1,162 @@
+use rand::distributions::Uniform;
+use rand::{thread_rng, Rng};
+use rayon::prelude::*;
+use std::cell::Cell;
+use std::cmp::{self, Ordering};
+use std::panic;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::Relaxed;
+use std::thread;
+
+static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+
+lazy_static::lazy_static! {
+ static ref DROP_COUNTS: Vec<AtomicUsize> = (0..20_000).map(|_| AtomicUsize::new(0)).collect();
+}
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+ x: u32,
+ id: usize,
+ version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+ fn eq(&self, other: &Self) -> bool {
+ self.partial_cmp(other) == Some(Ordering::Equal)
+ }
+}
+
+impl PartialOrd for DropCounter {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.version.set(self.version.get() + 1);
+ other.version.set(other.version.get() + 1);
+ VERSIONS.fetch_add(2, Relaxed);
+ self.x.partial_cmp(&other.x)
+ }
+}
+
+impl Ord for DropCounter {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.partial_cmp(other).unwrap()
+ }
+}
+
+impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+ VERSIONS.fetch_sub(self.version.get(), Relaxed);
+ }
+}
+
+macro_rules! test {
+ ($input:ident, $func:ident) => {
+ let len = $input.len();
+
+ // Work out the total number of comparisons required to sort
+ // this array...
+ let count = AtomicUsize::new(0);
+ $input.to_owned().$func(|a, b| {
+ count.fetch_add(1, Relaxed);
+ a.cmp(b)
+ });
+
+ let mut panic_countdown = count.load(Relaxed);
+ let step = if len <= 100 {
+ 1
+ } else {
+ cmp::max(1, panic_countdown / 10)
+ };
+
+ // ... and then panic after each `step` comparisons.
+ loop {
+ // Refresh the counters.
+ VERSIONS.store(0, Relaxed);
+ for i in 0..len {
+ DROP_COUNTS[i].store(0, Relaxed);
+ }
+
+ let v = $input.to_owned();
+ let _ = thread::spawn(move || {
+ let mut v = v;
+ let panic_countdown = AtomicUsize::new(panic_countdown);
+ v.$func(|a, b| {
+ if panic_countdown.fetch_sub(1, Relaxed) == 1 {
+ SILENCE_PANIC.with(|s| s.set(true));
+ panic!();
+ }
+ a.cmp(b)
+ })
+ })
+ .join();
+
+ // Check that the number of things dropped is exactly
+ // what we expect (i.e. the contents of `v`).
+ for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+ let count = c.load(Relaxed);
+ assert!(
+ count == 1,
+ "found drop count == {} for i == {}, len == {}",
+ count,
+ i,
+ len
+ );
+ }
+
+ // Check that the most recent versions of values were dropped.
+ assert_eq!(VERSIONS.load(Relaxed), 0);
+
+ if panic_countdown < step {
+ break;
+ }
+ panic_countdown -= step;
+ }
+ };
+}
+
+thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+fn sort_panic_safe() {
+ let prev = panic::take_hook();
+ panic::set_hook(Box::new(move |info| {
+ if !SILENCE_PANIC.with(Cell::get) {
+ prev(info);
+ }
+ }));
+
+ for &len in &[1, 2, 3, 4, 5, 10, 20, 100, 500, 5_000, 20_000] {
+ let len_dist = Uniform::new(0, len);
+ for &modulus in &[5, 30, 1_000, 20_000] {
+ for &has_runs in &[false, true] {
+ let mut rng = thread_rng();
+ let mut input = (0..len)
+ .map(|id| DropCounter {
+ x: rng.gen_range(0, modulus),
+ id,
+ version: Cell::new(0),
+ })
+ .collect::<Vec<_>>();
+
+ if has_runs {
+ for c in &mut input {
+ c.x = c.id as u32;
+ }
+
+ for _ in 0..5 {
+ let a = rng.sample(&len_dist);
+ let b = rng.sample(&len_dist);
+ if a < b {
+ input[a..b].reverse();
+ } else {
+ input.swap(a, b);
+ }
+ }
+ }
+
+ test!(input, par_sort_by);
+ test!(input, par_sort_unstable_by);
+ }
+ }
+ }
+}
diff --git a/tests/str.rs b/tests/str.rs
new file mode 100644
index 0000000..0e1e35e
--- /dev/null
+++ b/tests/str.rs
@@ -0,0 +1,116 @@
+use rand::distributions::Standard;
+use rand::{Rng, SeedableRng};
+use rand_xorshift::XorShiftRng;
+use rayon::prelude::*;
+
+fn seeded_rng() -> XorShiftRng {
+ let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
+ (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
+ XorShiftRng::from_seed(seed)
+}
+
+#[test]
+pub fn execute_strings() {
+ let rng = seeded_rng();
+ let s: String = rng.sample_iter::<char, _>(&Standard).take(1024).collect();
+
+ let par_chars: String = s.par_chars().collect();
+ assert_eq!(s, par_chars);
+
+ let par_even: String = s.par_chars().filter(|&c| (c as u32) & 1 == 0).collect();
+ let ser_even: String = s.chars().filter(|&c| (c as u32) & 1 == 0).collect();
+ assert_eq!(par_even, ser_even);
+
+ // test `FromParallelIterator<&char> for String`
+ let vchars: Vec<char> = s.par_chars().collect();
+ let par_chars: String = vchars.par_iter().collect();
+ assert_eq!(s, par_chars);
+
+ let par_bytes: Vec<u8> = s.par_bytes().collect();
+ assert_eq!(s.as_bytes(), &*par_bytes);
+
+ let par_utf16: Vec<u16> = s.par_encode_utf16().collect();
+ let ser_utf16: Vec<u16> = s.encode_utf16().collect();
+ assert_eq!(par_utf16, ser_utf16);
+
+ let par_charind: Vec<_> = s.par_char_indices().collect();
+ let ser_charind: Vec<_> = s.char_indices().collect();
+ assert_eq!(par_charind, ser_charind);
+}
+
+#[test]
+pub fn execute_strings_split() {
+ // char testcases from examples in `str::split` etc.,
+ // plus a large self-test for good measure.
+ let tests = vec![
+ ("Mary had a little lamb", ' '),
+ ("", 'X'),
+ ("lionXXtigerXleopard", 'X'),
+ ("||||a||b|c", '|'),
+ ("(///)", '/'),
+ ("010", '0'),
+ (" a b c", ' '),
+ ("A.B.", '.'),
+ ("A..B..", '.'),
+ ("foo\r\nbar\n\nbaz\n", '\n'),
+ ("foo\nbar\n\r\nbaz", '\n'),
+ ("A few words", ' '),
+ (" Mary had\ta\u{2009}little \n\t lamb", ' '),
+ (include_str!("str.rs"), ' '),
+ ];
+
+ for &(string, separator) in &tests {
+ let serial: Vec<_> = string.split(separator).collect();
+ let parallel: Vec<_> = string.par_split(separator).collect();
+ assert_eq!(serial, parallel);
+
+ let serial_fn: Vec<_> = string.split(|c| c == separator).collect();
+ let parallel_fn: Vec<_> = string.par_split(|c| c == separator).collect();
+ assert_eq!(serial_fn, parallel_fn);
+ }
+
+ for &(string, separator) in &tests {
+ let serial: Vec<_> = string.split_terminator(separator).collect();
+ let parallel: Vec<_> = string.par_split_terminator(separator).collect();
+ assert_eq!(serial, parallel);
+ }
+
+ for &(string, separator) in &tests {
+ let serial: Vec<_> = string.split_terminator(|c| c == separator).collect();
+ let parallel: Vec<_> = string.par_split_terminator(|c| c == separator).collect();
+ assert_eq!(serial, parallel);
+ }
+
+ for &(string, _) in &tests {
+ let serial: Vec<_> = string.lines().collect();
+ let parallel: Vec<_> = string.par_lines().collect();
+ assert_eq!(serial, parallel);
+ }
+
+ for &(string, _) in &tests {
+ let serial: Vec<_> = string.split_whitespace().collect();
+ let parallel: Vec<_> = string.par_split_whitespace().collect();
+ assert_eq!(serial, parallel);
+ }
+
+ // try matching separators too!
+ for &(string, separator) in &tests {
+ let serial: Vec<_> = string.matches(separator).collect();
+ let parallel: Vec<_> = string.par_matches(separator).collect();
+ assert_eq!(serial, parallel);
+
+ let serial_fn: Vec<_> = string.matches(|c| c == separator).collect();
+ let parallel_fn: Vec<_> = string.par_matches(|c| c == separator).collect();
+ assert_eq!(serial_fn, parallel_fn);
+ }
+
+ for &(string, separator) in &tests {
+ let serial: Vec<_> = string.match_indices(separator).collect();
+ let parallel: Vec<_> = string.par_match_indices(separator).collect();
+ assert_eq!(serial, parallel);
+
+ let serial_fn: Vec<_> = string.match_indices(|c| c == separator).collect();
+ let parallel_fn: Vec<_> = string.par_match_indices(|c| c == separator).collect();
+ assert_eq!(serial_fn, parallel_fn);
+ }
+}