summaryrefslogtreecommitdiffstats
path: root/third_party/rust/crossbeam-channel/benches/crossbeam.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /third_party/rust/crossbeam-channel/benches/crossbeam.rs
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/crossbeam-channel/benches/crossbeam.rs')
-rw-r--r--third_party/rust/crossbeam-channel/benches/crossbeam.rs712
1 files changed, 712 insertions, 0 deletions
diff --git a/third_party/rust/crossbeam-channel/benches/crossbeam.rs b/third_party/rust/crossbeam-channel/benches/crossbeam.rs
new file mode 100644
index 0000000000..1c05222947
--- /dev/null
+++ b/third_party/rust/crossbeam-channel/benches/crossbeam.rs
@@ -0,0 +1,712 @@
+#![feature(test)]
+
+extern crate test;
+
+use crossbeam_channel::{bounded, unbounded};
+use crossbeam_utils::thread::scope;
+use test::Bencher;
+
+const TOTAL_STEPS: usize = 40_000;
+
+mod unbounded {
+ use super::*;
+
+ #[bench]
+ fn create(b: &mut Bencher) {
+ b.iter(unbounded::<i32>);
+ }
+
+ #[bench]
+ fn oneshot(b: &mut Bencher) {
+ b.iter(|| {
+ let (s, r) = unbounded::<i32>();
+ s.send(0).unwrap();
+ r.recv().unwrap();
+ });
+ }
+
+ #[bench]
+ fn inout(b: &mut Bencher) {
+ let (s, r) = unbounded::<i32>();
+ b.iter(|| {
+ s.send(0).unwrap();
+ r.recv().unwrap();
+ });
+ }
+
+ #[bench]
+ fn par_inout(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = unbounded::<i32>();
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn spsc(b: &mut Bencher) {
+ let steps = TOTAL_STEPS;
+ let (s, r) = unbounded::<i32>();
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+
+ b.iter(|| {
+ s1.send(()).unwrap();
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ r2.recv().unwrap();
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn spmc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = unbounded::<i32>();
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for i in 0..steps * threads {
+ s.send(i as i32).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpsc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = unbounded::<i32>();
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..steps * threads {
+ r.recv().unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpmc(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = unbounded::<i32>();
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+}
+
+mod bounded_n {
+ use super::*;
+
+ #[bench]
+ fn spsc(b: &mut Bencher) {
+ let steps = TOTAL_STEPS;
+ let (s, r) = bounded::<i32>(steps);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+
+ b.iter(|| {
+ s1.send(()).unwrap();
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ r2.recv().unwrap();
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn spmc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(steps * threads);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for i in 0..steps * threads {
+ s.send(i as i32).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpsc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(steps * threads);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..steps * threads {
+ r.recv().unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn par_inout(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(threads);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpmc(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ assert_eq!(threads % 2, 0);
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(steps * threads);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+}
+
+mod bounded_1 {
+ use super::*;
+
+ #[bench]
+ fn create(b: &mut Bencher) {
+ b.iter(|| bounded::<i32>(1));
+ }
+
+ #[bench]
+ fn oneshot(b: &mut Bencher) {
+ b.iter(|| {
+ let (s, r) = bounded::<i32>(1);
+ s.send(0).unwrap();
+ r.recv().unwrap();
+ });
+ }
+
+ #[bench]
+ fn spsc(b: &mut Bencher) {
+ let steps = TOTAL_STEPS;
+ let (s, r) = bounded::<i32>(1);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+
+ b.iter(|| {
+ s1.send(()).unwrap();
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ r2.recv().unwrap();
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn spmc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(1);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for i in 0..steps * threads {
+ s.send(i as i32).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpsc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(1);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..steps * threads {
+ r.recv().unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpmc(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(1);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+}
+
+mod bounded_0 {
+ use super::*;
+
+ #[bench]
+ fn create(b: &mut Bencher) {
+ b.iter(|| bounded::<i32>(0));
+ }
+
+ #[bench]
+ fn spsc(b: &mut Bencher) {
+ let steps = TOTAL_STEPS;
+ let (s, r) = bounded::<i32>(0);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+
+ b.iter(|| {
+ s1.send(()).unwrap();
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ r2.recv().unwrap();
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn spmc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(0);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for i in 0..steps * threads {
+ s.send(i as i32).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpsc(b: &mut Bencher) {
+ let threads = num_cpus::get() - 1;
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(0);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..steps * threads {
+ r.recv().unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+
+ #[bench]
+ fn mpmc(b: &mut Bencher) {
+ let threads = num_cpus::get();
+ let steps = TOTAL_STEPS / threads;
+ let (s, r) = bounded::<i32>(0);
+
+ let (s1, r1) = bounded(0);
+ let (s2, r2) = bounded(0);
+ scope(|scope| {
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for i in 0..steps {
+ s.send(i as i32).unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+ for _ in 0..threads / 2 {
+ scope.spawn(|_| {
+ while r1.recv().is_ok() {
+ for _ in 0..steps {
+ r.recv().unwrap();
+ }
+ s2.send(()).unwrap();
+ }
+ });
+ }
+
+ b.iter(|| {
+ for _ in 0..threads {
+ s1.send(()).unwrap();
+ }
+ for _ in 0..threads {
+ r2.recv().unwrap();
+ }
+ });
+ drop(s1);
+ })
+ .unwrap();
+ }
+}