summaryrefslogtreecommitdiffstats
path: root/vendor/rayon/src/iter
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /vendor/rayon/src/iter
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/rayon/src/iter')
-rw-r--r--vendor/rayon/src/iter/collect/test.rs2
-rw-r--r--vendor/rayon/src/iter/mod.rs146
-rw-r--r--vendor/rayon/src/iter/plumbing/README.md4
-rw-r--r--vendor/rayon/src/iter/skip_any.rs144
-rw-r--r--vendor/rayon/src/iter/skip_any_while.rs166
-rw-r--r--vendor/rayon/src/iter/take_any.rs144
-rw-r--r--vendor/rayon/src/iter/take_any_while.rs166
-rw-r--r--vendor/rayon/src/iter/test.rs3
8 files changed, 773 insertions, 2 deletions
diff --git a/vendor/rayon/src/iter/collect/test.rs b/vendor/rayon/src/iter/collect/test.rs
index b5f676f5d..97bec3f4e 100644
--- a/vendor/rayon/src/iter/collect/test.rs
+++ b/vendor/rayon/src/iter/collect/test.rs
@@ -76,6 +76,7 @@ fn right_produces_items_with_no_complete() {
// Complete is not called by the consumer. Hence,the collection vector is not fully initialized.
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn produces_items_with_no_complete() {
let counter = DropCounter::default();
let mut v = vec![];
@@ -273,6 +274,7 @@ fn right_panics() {
// The left consumer produces fewer items while the right
// consumer produces correct number; check that created elements are dropped
#[test]
+#[cfg_attr(not(panic = "unwind"), ignore)]
fn left_produces_fewer_items_drops() {
let counter = DropCounter::default();
let mut v = vec![];
diff --git a/vendor/rayon/src/iter/mod.rs b/vendor/rayon/src/iter/mod.rs
index 98c93267f..e60ea1633 100644
--- a/vendor/rayon/src/iter/mod.rs
+++ b/vendor/rayon/src/iter/mod.rs
@@ -141,10 +141,14 @@ mod reduce;
mod repeat;
mod rev;
mod skip;
+mod skip_any;
+mod skip_any_while;
mod splitter;
mod step_by;
mod sum;
mod take;
+mod take_any;
+mod take_any_while;
mod try_fold;
mod try_reduce;
mod try_reduce_with;
@@ -185,9 +189,13 @@ pub use self::{
repeat::{repeat, repeatn, Repeat, RepeatN},
rev::Rev,
skip::Skip,
+ skip_any::SkipAny,
+ skip_any_while::SkipAnyWhile,
splitter::{split, Split},
step_by::StepBy,
take::Take,
+ take_any::TakeAny,
+ take_any_while::TakeAnyWhile,
try_fold::{TryFold, TryFoldWith},
update::Update,
while_some::WhileSome,
@@ -2194,6 +2202,143 @@ pub trait ParallelIterator: Sized + Send {
Intersperse::new(self, element)
}
+ /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
+ ///
+ /// This is similar to [`IndexedParallelIterator::take`] without being
+ /// constrained to the "first" `n` of the original iterator order. The
+ /// taken items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .filter(|&x| x % 2 == 0)
+ /// .take_any(5)
+ /// .collect();
+ ///
+ /// assert_eq!(result.len(), 5);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn take_any(self, n: usize) -> TakeAny<Self> {
+ TakeAny::new(self, n)
+ }
+
+ /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
+ ///
+ /// This is similar to [`IndexedParallelIterator::skip`] without being
+ /// constrained to the "first" `n` of the original iterator order. The
+ /// remaining items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .filter(|&x| x % 2 == 0)
+ /// .skip_any(5)
+ /// .collect();
+ ///
+ /// assert_eq!(result.len(), 45);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn skip_any(self, n: usize) -> SkipAny<Self> {
+ SkipAny::new(self, n)
+ }
+
+ /// Creates an iterator that takes elements from *anywhere* in the original iterator
+ /// until the given `predicate` returns `false`.
+ ///
+ /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
+ /// global condition unrelated to the item itself, or some combination thereof.
+ ///
+ /// If parallel calls to the `predicate` race and give different results, then the
+ /// `true` results will still take those particular items, while respecting the `false`
+ /// result from elsewhere to skip any further items.
+ ///
+ /// This is similar to [`Iterator::take_while`] without being constrained to the original
+ /// iterator order. The taken items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .take_any_while(|x| *x < 50)
+ /// .collect();
+ ///
+ /// assert!(result.len() <= 50);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// use std::sync::atomic::AtomicUsize;
+ /// use std::sync::atomic::Ordering::Relaxed;
+ ///
+ /// // Collect any group of items that sum <= 1000
+ /// let quota = AtomicUsize::new(1000);
+ /// let result: Vec<_> = (0_usize..100)
+ /// .into_par_iter()
+ /// .take_any_while(|&x| {
+ /// quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
+ /// .is_ok()
+ /// })
+ /// .collect();
+ ///
+ /// let sum = result.iter().sum::<usize>();
+ /// assert!(matches!(sum, 902..=1000));
+ /// ```
+ fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
+ where
+ P: Fn(&Self::Item) -> bool + Sync + Send,
+ {
+ TakeAnyWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that skips elements from *anywhere* in the original iterator
+ /// until the given `predicate` returns `false`.
+ ///
+ /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
+ /// global condition unrelated to the item itself, or some combination thereof.
+ ///
+ /// If parallel calls to the `predicate` race and give different results, then the
+ /// `true` results will still skip those particular items, while respecting the `false`
+ /// result from elsewhere to skip any further items.
+ ///
+ /// This is similar to [`Iterator::skip_while`] without being constrained to the original
+ /// iterator order. The remaining items will still maintain their relative order where that is
+ /// visible in `collect`, `reduce`, and similar outputs.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let result: Vec<_> = (0..100)
+ /// .into_par_iter()
+ /// .skip_any_while(|x| *x < 50)
+ /// .collect();
+ ///
+ /// assert!(result.len() >= 50);
+ /// assert!(result.windows(2).all(|w| w[0] < w[1]));
+ /// ```
+ fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
+ where
+ P: Fn(&Self::Item) -> bool + Sync + Send,
+ {
+ SkipAnyWhile::new(self, predicate)
+ }
+
/// Internal method used to define the behavior of this parallel
/// iterator. You should not need to call this directly.
///
@@ -2419,6 +2564,7 @@ pub trait IndexedParallelIterator: ParallelIterator {
/// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
/// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
/// ```
+ #[track_caller]
fn chunks(self, chunk_size: usize) -> Chunks<Self> {
assert!(chunk_size != 0, "chunk_size must not be zero");
Chunks::new(self, chunk_size)
diff --git a/vendor/rayon/src/iter/plumbing/README.md b/vendor/rayon/src/iter/plumbing/README.md
index dbee36ba0..42d22effe 100644
--- a/vendor/rayon/src/iter/plumbing/README.md
+++ b/vendor/rayon/src/iter/plumbing/README.md
@@ -35,8 +35,8 @@ modes (which is why there are two):
more like a `for_each` call: each time a new item is produced, the
`consume` method is called with that item. (The traits themselves are
a bit more complex, as they support state that can be threaded
- through and ultimately reduced.) Unlike producers, there are two
- variants of consumers. The difference is how the split is performed:
+ through and ultimately reduced.) Like producers, there are two
+ variants of consumers which differ in how the split is performed:
- in the `Consumer` trait, splitting is done with `split_at`, which
accepts an index where the split should be performed. All
iterators can work in this mode. The resulting halves thus have an
diff --git a/vendor/rayon/src/iter/skip_any.rs b/vendor/rayon/src/iter/skip_any.rs
new file mode 100644
index 000000000..0660a564a
--- /dev/null
+++ b/vendor/rayon/src/iter/skip_any.rs
@@ -0,0 +1,144 @@
+use super::plumbing::*;
+use super::*;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// `SkipAny` is an iterator that skips over `n` elements from anywhere in `I`.
+/// This struct is created by the [`skip_any()`] method on [`ParallelIterator`]
+///
+/// [`skip_any()`]: trait.ParallelIterator.html#method.skip_any
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct SkipAny<I: ParallelIterator> {
+ base: I,
+ count: usize,
+}
+
+impl<I> SkipAny<I>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `SkipAny` iterator.
+ pub(super) fn new(base: I, count: usize) -> Self {
+ SkipAny { base, count }
+ }
+}
+
+impl<I> ParallelIterator for SkipAny<I>
+where
+ I: ParallelIterator,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = SkipAnyConsumer {
+ base: consumer,
+ count: &AtomicUsize::new(self.count),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct SkipAnyConsumer<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+impl<'f, T, C> Consumer<T> for SkipAnyConsumer<'f, C>
+where
+ C: Consumer<T>,
+ T: Send,
+{
+ type Folder = SkipAnyFolder<'f, C::Folder>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ SkipAnyConsumer { base: left, ..self },
+ SkipAnyConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ SkipAnyFolder {
+ base: self.base.into_folder(),
+ count: self.count,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
+
+impl<'f, T, C> UnindexedConsumer<T> for SkipAnyConsumer<'f, C>
+where
+ C: UnindexedConsumer<T>,
+ T: Send,
+{
+ fn split_off_left(&self) -> Self {
+ SkipAnyConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct SkipAnyFolder<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+fn checked_decrement(u: &AtomicUsize) -> bool {
+ u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
+ .is_ok()
+}
+
+impl<'f, T, C> Folder<T> for SkipAnyFolder<'f, C>
+where
+ C: Folder<T>,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if !checked_decrement(self.count) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .skip_while(move |_| checked_decrement(self.count)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
diff --git a/vendor/rayon/src/iter/skip_any_while.rs b/vendor/rayon/src/iter/skip_any_while.rs
new file mode 100644
index 000000000..28b9e5973
--- /dev/null
+++ b/vendor/rayon/src/iter/skip_any_while.rs
@@ -0,0 +1,166 @@
+use super::plumbing::*;
+use super::*;
+use std::fmt;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+/// `SkipAnyWhile` is an iterator that skips over elements from anywhere in `I`
+/// until the callback returns `false`.
+/// This struct is created by the [`skip_any_while()`] method on [`ParallelIterator`]
+///
+/// [`skip_any_while()`]: trait.ParallelIterator.html#method.skip_any_while
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
+pub struct SkipAnyWhile<I: ParallelIterator, P> {
+ base: I,
+ predicate: P,
+}
+
+impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for SkipAnyWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipAnyWhile")
+ .field("base", &self.base)
+ .finish()
+ }
+}
+
+impl<I, P> SkipAnyWhile<I, P>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `SkipAnyWhile` iterator.
+ pub(super) fn new(base: I, predicate: P) -> Self {
+ SkipAnyWhile { base, predicate }
+ }
+}
+
+impl<I, P> ParallelIterator for SkipAnyWhile<I, P>
+where
+ I: ParallelIterator,
+ P: Fn(&I::Item) -> bool + Sync + Send,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = SkipAnyWhileConsumer {
+ base: consumer,
+ predicate: &self.predicate,
+ skipping: &AtomicBool::new(true),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct SkipAnyWhileConsumer<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ skipping: &'p AtomicBool,
+}
+
+impl<'p, T, C, P> Consumer<T> for SkipAnyWhileConsumer<'p, C, P>
+where
+ C: Consumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ type Folder = SkipAnyWhileFolder<'p, C::Folder, P>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ SkipAnyWhileConsumer { base: left, ..self },
+ SkipAnyWhileConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ SkipAnyWhileFolder {
+ base: self.base.into_folder(),
+ predicate: self.predicate,
+ skipping: self.skipping,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
+
+impl<'p, T, C, P> UnindexedConsumer<T> for SkipAnyWhileConsumer<'p, C, P>
+where
+ C: UnindexedConsumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ fn split_off_left(&self) -> Self {
+ SkipAnyWhileConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct SkipAnyWhileFolder<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ skipping: &'p AtomicBool,
+}
+
+fn skip<T>(item: &T, skipping: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
+ if !skipping.load(Ordering::Relaxed) {
+ return false;
+ }
+ if predicate(item) {
+ return true;
+ }
+ skipping.store(false, Ordering::Relaxed);
+ false
+}
+
+impl<'p, T, C, P> Folder<T> for SkipAnyWhileFolder<'p, C, P>
+where
+ C: Folder<T>,
+ P: Fn(&T) -> bool + 'p,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if !skip(&item, self.skipping, self.predicate) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .skip_while(move |x| skip(x, self.skipping, self.predicate)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.base.full()
+ }
+}
diff --git a/vendor/rayon/src/iter/take_any.rs b/vendor/rayon/src/iter/take_any.rs
new file mode 100644
index 000000000..e3992b383
--- /dev/null
+++ b/vendor/rayon/src/iter/take_any.rs
@@ -0,0 +1,144 @@
+use super::plumbing::*;
+use super::*;
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// `TakeAny` is an iterator that iterates over `n` elements from anywhere in `I`.
+/// This struct is created by the [`take_any()`] method on [`ParallelIterator`]
+///
+/// [`take_any()`]: trait.ParallelIterator.html#method.take_any
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct TakeAny<I: ParallelIterator> {
+ base: I,
+ count: usize,
+}
+
+impl<I> TakeAny<I>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `TakeAny` iterator.
+ pub(super) fn new(base: I, count: usize) -> Self {
+ TakeAny { base, count }
+ }
+}
+
+impl<I> ParallelIterator for TakeAny<I>
+where
+ I: ParallelIterator,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = TakeAnyConsumer {
+ base: consumer,
+ count: &AtomicUsize::new(self.count),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct TakeAnyConsumer<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+impl<'f, T, C> Consumer<T> for TakeAnyConsumer<'f, C>
+where
+ C: Consumer<T>,
+ T: Send,
+{
+ type Folder = TakeAnyFolder<'f, C::Folder>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ TakeAnyConsumer { base: left, ..self },
+ TakeAnyConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ TakeAnyFolder {
+ base: self.base.into_folder(),
+ count: self.count,
+ }
+ }
+
+ fn full(&self) -> bool {
+ self.count.load(Ordering::Relaxed) == 0 || self.base.full()
+ }
+}
+
+impl<'f, T, C> UnindexedConsumer<T> for TakeAnyConsumer<'f, C>
+where
+ C: UnindexedConsumer<T>,
+ T: Send,
+{
+ fn split_off_left(&self) -> Self {
+ TakeAnyConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct TakeAnyFolder<'f, C> {
+ base: C,
+ count: &'f AtomicUsize,
+}
+
+fn checked_decrement(u: &AtomicUsize) -> bool {
+ u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1))
+ .is_ok()
+}
+
+impl<'f, T, C> Folder<T> for TakeAnyFolder<'f, C>
+where
+ C: Folder<T>,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if checked_decrement(self.count) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .take_while(move |_| checked_decrement(self.count)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ self.count.load(Ordering::Relaxed) == 0 || self.base.full()
+ }
+}
diff --git a/vendor/rayon/src/iter/take_any_while.rs b/vendor/rayon/src/iter/take_any_while.rs
new file mode 100644
index 000000000..e6a91afab
--- /dev/null
+++ b/vendor/rayon/src/iter/take_any_while.rs
@@ -0,0 +1,166 @@
+use super::plumbing::*;
+use super::*;
+use std::fmt;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+/// `TakeAnyWhile` is an iterator that iterates over elements from anywhere in `I`
+/// until the callback returns `false`.
+/// This struct is created by the [`take_any_while()`] method on [`ParallelIterator`]
+///
+/// [`take_any_while()`]: trait.ParallelIterator.html#method.take_any_while
+/// [`ParallelIterator`]: trait.ParallelIterator.html
+#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
+#[derive(Clone)]
+pub struct TakeAnyWhile<I: ParallelIterator, P> {
+ base: I,
+ predicate: P,
+}
+
+impl<I: ParallelIterator + fmt::Debug, P> fmt::Debug for TakeAnyWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeAnyWhile")
+ .field("base", &self.base)
+ .finish()
+ }
+}
+
+impl<I, P> TakeAnyWhile<I, P>
+where
+ I: ParallelIterator,
+{
+ /// Creates a new `TakeAnyWhile` iterator.
+ pub(super) fn new(base: I, predicate: P) -> Self {
+ TakeAnyWhile { base, predicate }
+ }
+}
+
+impl<I, P> ParallelIterator for TakeAnyWhile<I, P>
+where
+ I: ParallelIterator,
+ P: Fn(&I::Item) -> bool + Sync + Send,
+{
+ type Item = I::Item;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let consumer1 = TakeAnyWhileConsumer {
+ base: consumer,
+ predicate: &self.predicate,
+ taking: &AtomicBool::new(true),
+ };
+ self.base.drive_unindexed(consumer1)
+ }
+}
+
+/// ////////////////////////////////////////////////////////////////////////
+/// Consumer implementation
+
+struct TakeAnyWhileConsumer<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ taking: &'p AtomicBool,
+}
+
+impl<'p, T, C, P> Consumer<T> for TakeAnyWhileConsumer<'p, C, P>
+where
+ C: Consumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ type Folder = TakeAnyWhileFolder<'p, C::Folder, P>;
+ type Reducer = C::Reducer;
+ type Result = C::Result;
+
+ fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) {
+ let (left, right, reducer) = self.base.split_at(index);
+ (
+ TakeAnyWhileConsumer { base: left, ..self },
+ TakeAnyWhileConsumer {
+ base: right,
+ ..self
+ },
+ reducer,
+ )
+ }
+
+ fn into_folder(self) -> Self::Folder {
+ TakeAnyWhileFolder {
+ base: self.base.into_folder(),
+ predicate: self.predicate,
+ taking: self.taking,
+ }
+ }
+
+ fn full(&self) -> bool {
+ !self.taking.load(Ordering::Relaxed) || self.base.full()
+ }
+}
+
+impl<'p, T, C, P> UnindexedConsumer<T> for TakeAnyWhileConsumer<'p, C, P>
+where
+ C: UnindexedConsumer<T>,
+ P: Fn(&T) -> bool + Sync,
+{
+ fn split_off_left(&self) -> Self {
+ TakeAnyWhileConsumer {
+ base: self.base.split_off_left(),
+ ..*self
+ }
+ }
+
+ fn to_reducer(&self) -> Self::Reducer {
+ self.base.to_reducer()
+ }
+}
+
+struct TakeAnyWhileFolder<'p, C, P> {
+ base: C,
+ predicate: &'p P,
+ taking: &'p AtomicBool,
+}
+
+fn take<T>(item: &T, taking: &AtomicBool, predicate: &impl Fn(&T) -> bool) -> bool {
+ if !taking.load(Ordering::Relaxed) {
+ return false;
+ }
+ if predicate(item) {
+ return true;
+ }
+ taking.store(false, Ordering::Relaxed);
+ false
+}
+
+impl<'p, T, C, P> Folder<T> for TakeAnyWhileFolder<'p, C, P>
+where
+ C: Folder<T>,
+ P: Fn(&T) -> bool + 'p,
+{
+ type Result = C::Result;
+
+ fn consume(mut self, item: T) -> Self {
+ if take(&item, self.taking, self.predicate) {
+ self.base = self.base.consume(item);
+ }
+ self
+ }
+
+ fn consume_iter<I>(mut self, iter: I) -> Self
+ where
+ I: IntoIterator<Item = T>,
+ {
+ self.base = self.base.consume_iter(
+ iter.into_iter()
+ .take_while(move |x| take(x, self.taking, self.predicate)),
+ );
+ self
+ }
+
+ fn complete(self) -> C::Result {
+ self.base.complete()
+ }
+
+ fn full(&self) -> bool {
+ !self.taking.load(Ordering::Relaxed) || self.base.full()
+ }
+}
diff --git a/vendor/rayon/src/iter/test.rs b/vendor/rayon/src/iter/test.rs
index 94323d79d..c72068df7 100644
--- a/vendor/rayon/src/iter/test.rs
+++ b/vendor/rayon/src/iter/test.rs
@@ -468,6 +468,7 @@ fn check_cmp_gt_to_seq() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_cmp_short_circuit() {
// We only use a single thread in order to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
@@ -497,6 +498,7 @@ fn check_cmp_short_circuit() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_partial_cmp_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();
@@ -526,6 +528,7 @@ fn check_partial_cmp_short_circuit() {
}
#[test]
+#[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fn check_partial_cmp_nan_short_circuit() {
// We only use a single thread to make the short-circuit behavior deterministic.
let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap();