use super::TaskPool;1use alloc::vec::Vec;23/// Provides functions for mapping read-only slices across a provided [`TaskPool`].4pub trait ParallelSlice<T: Sync>: AsRef<[T]> {5/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks6/// in parallel across the provided `task_pool`. One task is spawned in the task pool7/// for every chunk.8///9/// The iteration function takes the index of the chunk in the original slice as the10/// first argument, and the chunk as the second argument.11///12/// Returns a `Vec` of the mapped results in the same order as the input.13///14/// # Example15///16/// ```17/// # use bevy_tasks::prelude::*;18/// # use bevy_tasks::TaskPool;19/// let task_pool = TaskPool::new();20/// let counts = (0..10000).collect::<Vec<u32>>();21/// let incremented = counts.par_chunk_map(&task_pool, 100, |_index, chunk| {22/// let mut results = Vec::new();23/// for count in chunk {24/// results.push(*count + 2);25/// }26/// results27/// });28/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();29/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());30/// ```31///32/// # See Also33///34/// - [`ParallelSliceMut::par_chunk_map_mut`] for mapping mutable slices.35/// - [`ParallelSlice::par_splat_map`] for mapping when a specific chunk size is unknown.36fn par_chunk_map<F, R>(&self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>37where38F: Fn(usize, &[T]) -> R + Send + Sync,39R: Send + 'static,40{41let slice = self.as_ref();42let f = &f;43task_pool.scope(|scope| {44for (index, chunk) in slice.chunks(chunk_size).enumerate() {45scope.spawn(async move { f(index, chunk) });46}47})48}4950/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel51/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.52///53/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in54/// `task_pool`.55///56/// The iteration function takes the index of the chunk in the original slice as the57/// first argument, and the chunk as the second argument.58///59/// Returns a `Vec` of the mapped results in the same order as the input.60///61/// # Example62///63/// ```64/// # use bevy_tasks::prelude::*;65/// # use bevy_tasks::TaskPool;66/// let task_pool = TaskPool::new();67/// let counts = (0..10000).collect::<Vec<u32>>();68/// let incremented = counts.par_splat_map(&task_pool, None, |_index, chunk| {69/// let mut results = Vec::new();70/// for count in chunk {71/// results.push(*count + 2);72/// }73/// results74/// });75/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();76/// # assert_eq!(flattened, (2..10002).collect::<Vec<u32>>());77/// ```78///79/// # See Also80///81/// [`ParallelSliceMut::par_splat_map_mut`] for mapping mutable slices.82/// [`ParallelSlice::par_chunk_map`] for mapping when a specific chunk size is desirable.83fn par_splat_map<F, R>(&self, task_pool: &TaskPool, max_tasks: Option<usize>, f: F) -> Vec<R>84where85F: Fn(usize, &[T]) -> R + Send + Sync,86R: Send + 'static,87{88let slice = self.as_ref();89let chunk_size = core::cmp::max(901,91core::cmp::max(92slice.len() / task_pool.thread_num(),93slice.len() / max_tasks.unwrap_or(usize::MAX),94),95);9697slice.par_chunk_map(task_pool, chunk_size, f)98}99}100101impl<S, T: Sync> ParallelSlice<T> for S where S: AsRef<[T]> {}102103/// Provides functions for mapping mutable slices across a provided [`TaskPool`].104pub trait ParallelSliceMut<T: Send>: AsMut<[T]> {105/// Splits the slice in chunks of size `chunks_size` or less and maps the chunks106/// in parallel across the provided `task_pool`. One task is spawned in the task pool107/// for every chunk.108///109/// The iteration function takes the index of the chunk in the original slice as the110/// first argument, and the chunk as the second argument.111///112/// Returns a `Vec` of the mapped results in the same order as the input.113///114/// # Example115///116/// ```117/// # use bevy_tasks::prelude::*;118/// # use bevy_tasks::TaskPool;119/// let task_pool = TaskPool::new();120/// let mut counts = (0..10000).collect::<Vec<u32>>();121/// let incremented = counts.par_chunk_map_mut(&task_pool, 100, |_index, chunk| {122/// let mut results = Vec::new();123/// for count in chunk {124/// *count += 5;125/// results.push(*count - 2);126/// }127/// results128/// });129///130/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());131/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect();132/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());133/// ```134///135/// # See Also136///137/// [`ParallelSlice::par_chunk_map`] for mapping immutable slices.138/// [`ParallelSliceMut::par_splat_map_mut`] for mapping when a specific chunk size is unknown.139fn par_chunk_map_mut<F, R>(&mut self, task_pool: &TaskPool, chunk_size: usize, f: F) -> Vec<R>140where141F: Fn(usize, &mut [T]) -> R + Send + Sync,142R: Send + 'static,143{144let slice = self.as_mut();145let f = &f;146task_pool.scope(|scope| {147for (index, chunk) in slice.chunks_mut(chunk_size).enumerate() {148scope.spawn(async move { f(index, chunk) });149}150})151}152153/// Splits the slice into a maximum of `max_tasks` chunks, and maps the chunks in parallel154/// across the provided `task_pool`. One task is spawned in the task pool for every chunk.155///156/// If `max_tasks` is `None`, this function will attempt to use one chunk per thread in157/// `task_pool`.158///159/// The iteration function takes the index of the chunk in the original slice as the160/// first argument, and the chunk as the second argument.161///162/// Returns a `Vec` of the mapped results in the same order as the input.163///164/// # Example165///166/// ```167/// # use bevy_tasks::prelude::*;168/// # use bevy_tasks::TaskPool;169/// let task_pool = TaskPool::new();170/// let mut counts = (0..10000).collect::<Vec<u32>>();171/// let incremented = counts.par_splat_map_mut(&task_pool, None, |_index, chunk| {172/// let mut results = Vec::new();173/// for count in chunk {174/// *count += 5;175/// results.push(*count - 2);176/// }177/// results178/// });179///180/// assert_eq!(counts, (5..10005).collect::<Vec<u32>>());181/// # let flattened: Vec<_> = incremented.into_iter().flatten().collect::<Vec<u32>>();182/// # assert_eq!(flattened, (3..10003).collect::<Vec<u32>>());183/// ```184///185/// # See Also186///187/// [`ParallelSlice::par_splat_map`] for mapping immutable slices.188/// [`ParallelSliceMut::par_chunk_map_mut`] for mapping when a specific chunk size is desirable.189fn par_splat_map_mut<F, R>(190&mut self,191task_pool: &TaskPool,192max_tasks: Option<usize>,193f: F,194) -> Vec<R>195where196F: Fn(usize, &mut [T]) -> R + Send + Sync,197R: Send + 'static,198{199let mut slice = self.as_mut();200let chunk_size = core::cmp::max(2011,202core::cmp::max(203slice.len() / task_pool.thread_num(),204slice.len() / max_tasks.unwrap_or(usize::MAX),205),206);207208slice.par_chunk_map_mut(task_pool, chunk_size, f)209}210}211212impl<S, T: Send> ParallelSliceMut<T> for S where S: AsMut<[T]> {}213214#[cfg(test)]215mod tests {216use crate::*;217use alloc::vec;218219#[test]220fn test_par_chunks_map() {221let v = vec![42; 1000];222let task_pool = TaskPool::new();223let outputs = v.par_splat_map(&task_pool, None, |_, numbers| -> i32 {224numbers.iter().sum()225});226227let mut sum = 0;228for output in outputs {229sum += output;230}231232assert_eq!(sum, 1000 * 42);233}234235#[test]236fn test_par_chunks_map_mut() {237let mut v = vec![42; 1000];238let task_pool = TaskPool::new();239240let outputs = v.par_splat_map_mut(&task_pool, None, |_, numbers| -> i32 {241for number in numbers.iter_mut() {242*number *= 2;243}244numbers.iter().sum()245});246247let mut sum = 0;248for output in outputs {249sum += output;250}251252assert_eq!(sum, 1000 * 42 * 2);253assert_eq!(v[0], 84);254}255256#[test]257fn test_par_chunks_map_index() {258let v = vec![1; 1000];259let task_pool = TaskPool::new();260let outputs = v.par_chunk_map(&task_pool, 100, |index, numbers| -> i32 {261numbers.iter().sum::<i32>() * index as i32262});263264assert_eq!(outputs.iter().sum::<i32>(), 100 * (9 * 10) / 2);265}266}267268269