summaryrefslogtreecommitdiffstats
path: root/src/boost/libs/mpi/example
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/boost/libs/mpi/example
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/boost/libs/mpi/example')
-rw-r--r--src/boost/libs/mpi/example/cartesian_communicator.cpp42
-rw-r--r--src/boost/libs/mpi/example/generate_collect.cpp129
-rw-r--r--src/boost/libs/mpi/example/generate_collect_optional.cpp113
-rw-r--r--src/boost/libs/mpi/example/global_min.cpp31
-rw-r--r--src/boost/libs/mpi/example/hello_world.cpp33
-rw-r--r--src/boost/libs/mpi/example/hello_world_broadcast.cpp28
-rw-r--r--src/boost/libs/mpi/example/hello_world_groups.cpp46
-rw-r--r--src/boost/libs/mpi/example/hello_world_nonblocking.cpp36
-rw-r--r--src/boost/libs/mpi/example/in_place_global_min.cpp29
-rw-r--r--src/boost/libs/mpi/example/parallel_example.cpp196
-rw-r--r--src/boost/libs/mpi/example/python/hello_world.py16
-rw-r--r--src/boost/libs/mpi/example/random_content.cpp98
-rw-r--r--src/boost/libs/mpi/example/random_gather.cpp31
-rw-r--r--src/boost/libs/mpi/example/random_min.cpp30
-rw-r--r--src/boost/libs/mpi/example/random_scatter.cpp37
-rw-r--r--src/boost/libs/mpi/example/reduce_performance_test.cpp138
-rw-r--r--src/boost/libs/mpi/example/string_cat.cpp46
17 files changed, 1079 insertions, 0 deletions
diff --git a/src/boost/libs/mpi/example/cartesian_communicator.cpp b/src/boost/libs/mpi/example/cartesian_communicator.cpp
new file mode 100644
index 000000000..7b011041d
--- /dev/null
+++ b/src/boost/libs/mpi/example/cartesian_communicator.cpp
@@ -0,0 +1,42 @@
+// Copyright Alain Miniussi 2014.
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Authors: Alain Miniussi
+
+#include <vector>
+#include <iostream>
+
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/collectives.hpp>
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/cartesian_communicator.hpp>
+
+namespace mpi = boost::mpi;
+// Curly brace init make this useless, but
+// - Need to support obsolete like g++ 4.3.x. for some reason
+// - Can't conditionnaly compile with bjam (unless you find
+// the doc, and read it, which would only make sense if you
+// actually wan't to use bjam, which does not (make sense))
+typedef mpi::cartesian_dimension cd;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env;
+ mpi::communicator world;
+
+ if (world.size() != 24) return -1;
+ mpi::cartesian_dimension dims[] = {cd(2, true), cd(3,true), cd(4,true)};
+ mpi::cartesian_communicator cart(world, mpi::cartesian_topology(dims));
+ for (int r = 0; r < cart.size(); ++r) {
+ cart.barrier();
+ if (r == cart.rank()) {
+ std::vector<int> c = cart.coordinates(r);
+ std::cout << "rk :" << r << " coords: "
+ << c[0] << ' ' << c[1] << ' ' << c[2] << '\n';
+ }
+ }
+ return 0;
+}
+
diff --git a/src/boost/libs/mpi/example/generate_collect.cpp b/src/boost/libs/mpi/example/generate_collect.cpp
new file mode 100644
index 000000000..5579d50d2
--- /dev/null
+++ b/src/boost/libs/mpi/example/generate_collect.cpp
@@ -0,0 +1,129 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's split() operation on communicators to
+// create separate data-generating processes and data-collecting
+// processes.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+#include <boost/serialization/vector.hpp>
+namespace mpi = boost::mpi;
+
+enum message_tags { msg_data_packet, msg_broadcast_data, msg_finished };
+
+void generate_data(mpi::communicator local, mpi::communicator world)
+{
+ using std::srand;
+ using std::rand;
+
+ // The rank of the collector within the world communicator
+ int master_collector = local.size();
+
+ srand(time(0) + world.rank());
+
+ // Send out several blocks of random data to the collectors.
+ int num_data_blocks = rand() % 3 + 1;
+ for (int block = 0; block < num_data_blocks; ++block) {
+ // Generate some random data
+ int num_samples = rand() % 1000;
+ std::vector<int> data;
+ for (int i = 0; i < num_samples; ++i) {
+ data.push_back(rand());
+ }
+
+ // Send our data to the master collector process.
+ std::cout << "Generator #" << local.rank() << " sends some data..."
+ << std::endl;
+ world.send(master_collector, msg_data_packet, data);
+ }
+
+ // Wait for all of the generators to complete
+ (local.barrier)();
+
+ // The first generator will send the message to the master collector
+ // indicating that we're done.
+ if (local.rank() == 0)
+ world.send(master_collector, msg_finished);
+}
+
+void collect_data(mpi::communicator local, mpi::communicator world)
+{
+ // The rank of the collector within the world communicator
+ int master_collector = world.size() - local.size();
+
+ if (world.rank() == master_collector) {
+ while (true) {
+ // Wait for a message
+ mpi::status msg = world.probe();
+ if (msg.tag() == msg_data_packet) {
+ // Receive the packet of data
+ std::vector<int> data;
+ world.recv(msg.source(), msg.tag(), data);
+
+ // Tell each of the collectors that we'll be broadcasting some data
+ for (int dest = 1; dest < local.size(); ++dest)
+ local.send(dest, msg_broadcast_data, msg.source());
+
+ // Broadcast the actual data.
+ broadcast(local, data, 0);
+ } else if (msg.tag() == msg_finished) {
+ // Receive the message
+ world.recv(msg.source(), msg.tag());
+
+ // Tell each of the collectors that we're finished
+ for (int dest = 1; dest < local.size(); ++dest)
+ local.send(dest, msg_finished);
+
+ break;
+ }
+ }
+ } else {
+ while (true) {
+ // Wait for a message from the master collector
+ mpi::status msg = local.probe();
+ if (msg.tag() == msg_broadcast_data) {
+ // Receive the broadcast message
+ int originator;
+ local.recv(msg.source(), msg.tag(), originator);
+
+ // Receive the data broadcasted from the master collector
+ std::vector<int> data;
+ broadcast(local, data, 0);
+
+ std::cout << "Collector #" << local.rank()
+ << " is processing data from generator #" << originator
+ << "." << std::endl;
+ } else if (msg.tag() == msg_finished) {
+ // Receive the message
+ local.recv(msg.source(), msg.tag());
+
+ break;
+ }
+ }
+ }
+}
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ if (world.size() < 3) {
+ if (world.rank() == 0) {
+ std::cerr << "Error: this example requires at least 3 processes."
+ << std::endl;
+ }
+ env.abort(-1);
+ }
+
+ bool is_generator = world.rank() < 2 * world.size() / 3;
+ mpi::communicator local = world.split(is_generator? 0 : 1);
+ if (is_generator) generate_data(local, world);
+ else collect_data(local, world);
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/generate_collect_optional.cpp b/src/boost/libs/mpi/example/generate_collect_optional.cpp
new file mode 100644
index 000000000..3aa3888c4
--- /dev/null
+++ b/src/boost/libs/mpi/example/generate_collect_optional.cpp
@@ -0,0 +1,113 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's split() operation on communicators to
+// create separate data-generating processes and data-collecting
+// processes using boost::optional for broadcasting.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+#include <boost/serialization/vector.hpp>
+#include <boost/serialization/optional.hpp>
+namespace mpi = boost::mpi;
+
+enum message_tags { msg_data_packet, msg_finished };
+
+void generate_data(mpi::communicator local, mpi::communicator world)
+{
+ using std::srand;
+ using std::rand;
+
+ // The rank of the collector within the world communicator
+ int master_collector = local.size();
+
+ srand(time(0) + world.rank());
+
+ // Send out several blocks of random data to the collectors.
+ int num_data_blocks = rand() % 3 + 1;
+ for (int block = 0; block < num_data_blocks; ++block) {
+ // Generate some random dataa
+ int num_samples = rand() % 1000;
+ std::vector<int> data;
+ for (int i = 0; i < num_samples; ++i) {
+ data.push_back(rand());
+ }
+
+ // Send our data to the master collector process.
+ std::cout << "Generator #" << local.rank() << " sends some data..."
+ << std::endl;
+ world.send(master_collector, msg_data_packet, data);
+ }
+
+ // Wait for all of the generators to complete
+ (local.barrier)();
+
+ // The first generator will send the message to the master collector
+ // indicating that we're done.
+ if (local.rank() == 0)
+ world.send(master_collector, msg_finished);
+}
+
+void collect_data(mpi::communicator local, mpi::communicator world)
+{
+ // The rank of the collector within the world communicator
+ int master_collector = world.size() - local.size();
+
+ if (world.rank() == master_collector) {
+ while (true) {
+ // Wait for a message
+ mpi::status msg = world.probe();
+ if (msg.tag() == msg_data_packet) {
+ // Receive the packet of data into a boost::optional
+ boost::optional<std::vector<int> > data;
+ data = std::vector<int>();
+ world.recv(msg.source(), msg.source(), *data);
+
+ // Broadcast the actual data.
+ broadcast(local, data, 0);
+ } else if (msg.tag() == msg_finished) {
+ // Receive the message
+ world.recv(msg.source(), msg.tag());
+
+ // Broadcast to each collector to tell them we've finished.
+ boost::optional<std::vector<int> > data;
+ broadcast(local, data, 0);
+ break;
+ }
+ }
+ } else {
+ boost::optional<std::vector<int> > data;
+ do {
+ // Wait for a broadcast from the master collector
+ broadcast(local, data, 0);
+ if (data) {
+ std::cout << "Collector #" << local.rank()
+ << " is processing data." << std::endl;
+ }
+ } while (data);
+ }
+}
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ if (world.size() < 4) {
+ if (world.rank() == 0) {
+ std::cerr << "Error: this example requires at least 4 processes."
+ << std::endl;
+ }
+ env.abort(-1);
+ }
+
+ bool is_generator = world.rank() < 2 * world.size() / 3;
+ mpi::communicator local = world.split(is_generator? 0 : 1);
+ if (is_generator) generate_data(local, world);
+ else collect_data(local, world);
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/global_min.cpp b/src/boost/libs/mpi/example/global_min.cpp
new file mode 100644
index 000000000..68de57dff
--- /dev/null
+++ b/src/boost/libs/mpi/example/global_min.cpp
@@ -0,0 +1,31 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's all_reduce() that compute the minimum
+// of each process's value and broadcast the result to all the processes.
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+ int minimum;
+
+ all_reduce(world, my_number, minimum, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << minimum << std::endl;
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/hello_world.cpp b/src/boost/libs/mpi/example/hello_world.cpp
new file mode 100644
index 000000000..7095c1724
--- /dev/null
+++ b/src/boost/libs/mpi/example/hello_world.cpp
@@ -0,0 +1,33 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A simple Hello, world! example using Boost.MPI message passing.
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <boost/serialization/string.hpp> // Needed to send/receive strings!
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ if (world.rank() == 0) {
+ world.send(1, 0, std::string("Hello"));
+ std::string msg;
+ world.recv(1, 1, msg);
+ std::cout << msg << "!" << std::endl;
+ } else {
+ std::string msg;
+ world.recv(0, 0, msg);
+ std::cout << msg << ", ";
+ std::cout.flush();
+ world.send(0, 1, std::string("world"));
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/hello_world_broadcast.cpp b/src/boost/libs/mpi/example/hello_world_broadcast.cpp
new file mode 100644
index 000000000..4ffe239ea
--- /dev/null
+++ b/src/boost/libs/mpi/example/hello_world_broadcast.cpp
@@ -0,0 +1,28 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A simple Hello, world! example using Boost.MPI broadcast()
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <boost/serialization/string.hpp> // Needed to send/receive strings!
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::string value;
+ if (world.rank() == 0) {
+ value = "Hello, World!";
+ }
+
+ broadcast(world, value, 0);
+
+ std::cout << "Process #" << world.rank() << " says " << value << std::endl;
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/hello_world_groups.cpp b/src/boost/libs/mpi/example/hello_world_groups.cpp
new file mode 100644
index 000000000..7493aa4a3
--- /dev/null
+++ b/src/boost/libs/mpi/example/hello_world_groups.cpp
@@ -0,0 +1,46 @@
+// Copyright (C) 2013 Andreas Hehn <hehn@phys.ethz.ch>, ETH Zurich
+// based on
+// hellp-world_broadcast.cpp (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A simple Hello world! example
+// using boost::mpi::group and boost::mpi::broadcast()
+
+#include <stdexcept>
+#include <boost/mpi/environment.hpp>
+#include <boost/mpi/communicator.hpp>
+#include <boost/mpi/group.hpp>
+#include <boost/mpi/collectives.hpp>
+
+#include <boost/serialization/string.hpp>
+
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+ if(world.size() < 2)
+ throw std::runtime_error("Please run with at least 2 MPI processes!");
+
+ int group_a_ranks[2] = {0,1};
+
+ mpi::group world_group = world.group();
+ mpi::group group_a = world_group.include(group_a_ranks,group_a_ranks+2);
+
+ mpi::communicator comm_a(world,group_a);
+
+ std::string value("Hello world!");
+ if(comm_a)
+ {
+ if(comm_a.rank() == 0) {
+ value = "Hello group a!";
+ }
+ mpi::broadcast(comm_a, value, 0);
+ }
+ std::cout << "Process #" << world.rank() << " says " << value << std::endl;
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/hello_world_nonblocking.cpp b/src/boost/libs/mpi/example/hello_world_nonblocking.cpp
new file mode 100644
index 000000000..c65247b99
--- /dev/null
+++ b/src/boost/libs/mpi/example/hello_world_nonblocking.cpp
@@ -0,0 +1,36 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// A simple Hello, world! example using Boost.MPI message passing.
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <boost/serialization/string.hpp> // Needed to send/receive strings!
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ if (world.rank() == 0) {
+ mpi::request reqs[2];
+ std::string msg, out_msg = "Hello";
+ reqs[0] = world.isend(1, 0, out_msg);
+ reqs[1] = world.irecv(1, 1, msg);
+ mpi::wait_all(reqs, reqs + 2);
+ std::cout << msg << "!" << std::endl;
+ } else {
+ mpi::request reqs[2];
+ std::string msg, out_msg = "world";
+ reqs[0] = world.isend(0, 1, out_msg);
+ reqs[1] = world.irecv(0, 0, msg);
+ mpi::wait_all(reqs, reqs + 2);
+ std::cout << msg << ", ";
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/in_place_global_min.cpp b/src/boost/libs/mpi/example/in_place_global_min.cpp
new file mode 100644
index 000000000..7da2cf2bf
--- /dev/null
+++ b/src/boost/libs/mpi/example/in_place_global_min.cpp
@@ -0,0 +1,29 @@
+// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's all_reduce() that compute the minimum
+// of each process's value and broadcast the result to all the processes.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(world.rank());
+ int my_number = std::rand();
+
+ all_reduce(world, my_number, mpi::minimum<int>());
+
+ if (world.rank() == 0) {
+ std::cout << "The minimum value is " << my_number << std::endl;
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/parallel_example.cpp b/src/boost/libs/mpi/example/parallel_example.cpp
new file mode 100644
index 000000000..00347d51c
--- /dev/null
+++ b/src/boost/libs/mpi/example/parallel_example.cpp
@@ -0,0 +1,196 @@
+// Copyright (C) 2005-2006 Matthias Troyer
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example of a parallel Monte Carlo simulation using some nodes to produce
+// data and others to aggregate the data
+#include <iostream>
+
+#include <boost/mpi.hpp>
+#include <boost/random/parallel.hpp>
+#include <boost/random.hpp>
+#include <boost/foreach.hpp>
+#include <iostream>
+#include <cstdlib>
+
+namespace mpi = boost::mpi;
+
+enum {sample_tag, sample_skeleton_tag, sample_broadcast_tag, quit_tag};
+
+
+void calculate_samples(int sample_length)
+{
+ int num_samples = 100;
+ std::vector<double> sample(sample_length);
+
+ // setup communicator by splitting
+
+ mpi::communicator world;
+ mpi::communicator calculate_communicator = world.split(0);
+
+ unsigned int num_calculate_ranks = calculate_communicator.size();
+
+ // the master of the accumulaion ranks is the first of them, hence
+ // with a rank just one after the last calculation rank
+ int master_accumulate_rank = num_calculate_ranks;
+
+ // the master of the calculation ranks sends the skeleton of the sample
+ // to the master of the accumulation ranks
+
+ if (world.rank()==0)
+ world.send(master_accumulate_rank,sample_skeleton_tag,mpi::skeleton(sample));
+
+ // next we extract the content of the sample vector, to be used in sending
+ // the content later on
+
+ mpi::content sample_content = mpi::get_content(sample);
+
+ // now intialize the parallel random number generator
+
+ boost::lcg64 engine(
+ boost::random::stream_number = calculate_communicator.rank(),
+ boost::random::total_streams = calculate_communicator.size()
+ );
+
+ boost::variate_generator<boost::lcg64&,boost::uniform_real<> >
+ rng(engine,boost::uniform_real<>());
+
+ for (unsigned int i=0; i<num_samples/num_calculate_ranks+1;++i) {
+
+ // calculate sample by filling the vector with random numbers
+ // note that std::generate will not work since it takes the generator
+ // by value, and boost::ref cannot be used as a generator.
+ // boost::ref should be fixed so that it can be used as generator
+
+ BOOST_FOREACH(double& x, sample)
+ x = rng();
+
+ // send sample to accumulation ranks
+ // Ideally we want to do this as a broadcast with an inter-communicator
+ // between the calculation and accumulation ranks. MPI2 should support
+ // this, but here we present an MPI1 compatible solution.
+
+ // send content of sample to first (master) accumulation process
+
+ world.send(master_accumulate_rank,sample_tag,sample_content);
+
+ // gather some results from all calculation ranks
+
+ double local_result = sample[0];
+ std::vector<double> gathered_results(calculate_communicator.size());
+ mpi::all_gather(calculate_communicator,local_result,gathered_results);
+ }
+
+ // we are done: the master tells the accumulation ranks to quit
+ if (world.rank()==0)
+ world.send(master_accumulate_rank,quit_tag);
+}
+
+
+
+void accumulate_samples()
+{
+ std::vector<double> sample;
+
+ // setup the communicator for all accumulation ranks by splitting
+
+ mpi::communicator world;
+ mpi::communicator accumulate_communicator = world.split(1);
+
+ bool is_master_accumulate_rank = accumulate_communicator.rank()==0;
+
+ // the master receives the sample skeleton
+
+ if (is_master_accumulate_rank)
+ world.recv(0,sample_skeleton_tag,mpi::skeleton(sample));
+
+ // and broadcasts it to all accumulation ranks
+ mpi::broadcast(accumulate_communicator,mpi::skeleton(sample),0);
+
+ // next we extract the content of the sample vector, to be used in receiving
+ // the content later on
+
+ mpi::content sample_content = mpi::get_content(sample);
+
+ // accumulate until quit is called
+ double sum=0.;
+ while (true) {
+
+
+ // the accumulation master checks whether we should quit
+ if (world.iprobe(0,quit_tag)) {
+ world.recv(0,quit_tag);
+ for (int i=1; i<accumulate_communicator.size();++i)
+ accumulate_communicator.send(i,quit_tag);
+ std::cout << sum << "\n";
+ break; // We're done
+ }
+
+ // the otehr accumulation ranks check whether we should quit
+ if (accumulate_communicator.iprobe(0,quit_tag)) {
+ accumulate_communicator.recv(0,quit_tag);
+ std::cout << sum << "\n";
+ break; // We're done
+ }
+
+ // check whether the master accumulation rank has received a sample
+ if (world.iprobe(mpi::any_source,sample_tag)) {
+ BOOST_ASSERT(is_master_accumulate_rank);
+
+ // receive the content
+ world.recv(mpi::any_source,sample_tag,sample_content);
+
+ // now we need to braodcast
+ // the problam is we do not have a non-blocking broadcast that we could
+ // abort if we receive a quit message from the master. We thus need to
+ // first tell all accumulation ranks to start a broadcast. If the sample
+ // is small, we could just send the sample in this message, but here we
+ // optimize the code for large samples, so that the overhead of these
+ // sends can be ignored, and we count on an optimized broadcast
+ // implementation with O(log N) complexity
+
+ for (int i=1; i<accumulate_communicator.size();++i)
+ accumulate_communicator.send(i,sample_broadcast_tag);
+
+ // now broadcast the contents of the sample to all accumulate ranks
+ mpi::broadcast(accumulate_communicator,sample_content,0);
+
+ // and handle the sample by summing the appropriate value
+ sum += sample[0];
+ }
+
+ // the other accumulation ranks wait for a mesage to start the broadcast
+ if (accumulate_communicator.iprobe(0,sample_broadcast_tag)) {
+ BOOST_ASSERT(!is_master_accumulate_rank);
+
+ accumulate_communicator.recv(0,sample_broadcast_tag);
+
+ // receive broadcast of the sample contents
+ mpi::broadcast(accumulate_communicator,sample_content,0);
+
+ // and handle the sample
+
+ // and handle the sample by summing the appropriate value
+ sum += sample[accumulate_communicator.rank()];
+ }
+ }
+}
+
+int main(int argc, char** argv)
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ // half of the processes generate, the others accumulate
+ // the sample size is just the number of accumulation ranks
+ if (world.rank() < world.size()/2)
+ calculate_samples(world.size()-world.size()/2);
+ else
+ accumulate_samples();
+
+ return 0;
+}
+
+
diff --git a/src/boost/libs/mpi/example/python/hello_world.py b/src/boost/libs/mpi/example/python/hello_world.py
new file mode 100644
index 000000000..73312483b
--- /dev/null
+++ b/src/boost/libs/mpi/example/python/hello_world.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
+
+# Use, modification and distribution is subject to the Boost Software
+# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import boost.parallel.mpi as mpi
+
+if mpi.world.rank == 0:
+ mpi.world.send(1, 0, 'Hello')
+ msg = mpi.world.recv(1, 1)
+ print msg,'!'
+else:
+ msg = mpi.world.recv(0, 0)
+ print msg,', ',
+ mpi.world.send(0, 1, 'world')
diff --git a/src/boost/libs/mpi/example/random_content.cpp b/src/boost/libs/mpi/example/random_content.cpp
new file mode 100644
index 000000000..22fd06d87
--- /dev/null
+++ b/src/boost/libs/mpi/example/random_content.cpp
@@ -0,0 +1,98 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's skeletons and content to optimize
+// communication.
+#include <boost/mpi.hpp>
+#include <boost/serialization/list.hpp>
+#include <algorithm>
+#include <functional>
+#include <numeric>
+#include <iostream>
+#include <stdlib.h>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ if (world.size() < 2 || world.size() > 4) {
+ if (world.rank() == 0)
+ std::cerr << "error: please execute this program with 2-4 processes.\n";
+ world.abort(-1);
+ }
+
+ if (world.rank() == 0) {
+ int list_len = 50;
+ int iterations = 10;
+
+ if (argc > 1) list_len = atoi(argv[1]);
+ if (argc > 2) iterations = atoi(argv[2]);
+
+ if (list_len <= 0) {
+ std::cerr << "error: please specific a list length greater than zero.\n";
+ world.abort(-1);
+ }
+
+ // Generate the list and broadcast its structure
+ std::list<int> l(list_len);
+ broadcast(world, mpi::skeleton(l), 0);
+
+ // Generate content several times and broadcast out that content
+ mpi::content c = mpi::get_content(l);
+ for (int i = 0; i < iterations; ++i) {
+ do {
+ std::generate(l.begin(), l.end(), &random);
+ } while (std::find_if(l.begin(), l.end(),
+ std::bind1st(std::not_equal_to<int>(), 0))
+ == l.end());
+
+
+ std::cout << "Iteration #" << i << ": sending content"
+ << " (min = " << *std::min_element(l.begin(), l.end())
+ << ", max = " << *std::max_element(l.begin(), l.end())
+ << ", avg = "
+ << std::accumulate(l.begin(), l.end(), 0)/l.size()
+ << ").\n";
+
+ broadcast(world, c, 0);
+ }
+
+ // Notify the slaves that we're done by sending all zeroes
+ std::fill(l.begin(), l.end(), 0);
+ broadcast(world, c, 0);
+
+ } else {
+ // Receive the content and build up our own list
+ std::list<int> l;
+ broadcast(world, mpi::skeleton(l), 0);
+
+ mpi::content c = mpi::get_content(l);
+ int i = 0;
+ do {
+ broadcast(world, c, 0);
+
+ if (std::find_if(l.begin(), l.end(),
+ std::bind1st(std::not_equal_to<int>(), 0)) == l.end())
+ break;
+
+ if (world.rank() == 1)
+ std::cout << "Iteration #" << i << ": max value = "
+ << *std::max_element(l.begin(), l.end()) << ".\n";
+ else if (world.rank() == 2)
+ std::cout << "Iteration #" << i << ": min value = "
+ << *std::min_element(l.begin(), l.end()) << ".\n";
+ else if (world.rank() == 3)
+ std::cout << "Iteration #" << i << ": avg value = "
+ << std::accumulate(l.begin(), l.end(), 0)/l.size()
+ << ".\n";
+ ++i;
+ } while (true);
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/random_gather.cpp b/src/boost/libs/mpi/example/random_gather.cpp
new file mode 100644
index 000000000..17825f157
--- /dev/null
+++ b/src/boost/libs/mpi/example/random_gather.cpp
@@ -0,0 +1,31 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's gather(): [main]
+
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ int my_number = std::rand();
+ if (world.rank() == 0) {
+ std::vector<int> all_numbers;
+ gather(world, my_number, all_numbers, 0);
+ for (int proc = 0; proc < world.size(); ++proc)
+ std::cout << "Process #" << proc << " thought of " << all_numbers[proc]
+ << std::endl;
+ } else {
+ gather(world, my_number, 0);
+ }
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/random_min.cpp b/src/boost/libs/mpi/example/random_min.cpp
new file mode 100644
index 000000000..d0a67ee02
--- /dev/null
+++ b/src/boost/libs/mpi/example/random_min.cpp
@@ -0,0 +1,30 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's reduce() to compute a minimum value.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <cstdlib>
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ int my_number = std::rand();
+
+ if (world.rank() == 0) {
+ int minimum;
+ reduce(world, my_number, minimum, mpi::minimum<int>(), 0);
+ std::cout << "The minimum value is " << minimum << std::endl;
+ } else {
+ reduce(world, my_number, mpi::minimum<int>(), 0);
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/random_scatter.cpp b/src/boost/libs/mpi/example/random_scatter.cpp
new file mode 100644
index 000000000..fc8879e4e
--- /dev/null
+++ b/src/boost/libs/mpi/example/random_scatter.cpp
@@ -0,0 +1,37 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's gather(): [main]
+
+#include <boost/mpi.hpp>
+#include <boost/mpi/collectives.hpp>
+#include <iostream>
+#include <cstdlib>
+#include <vector>
+
+namespace mpi = boost::mpi;
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::srand(time(0) + world.rank());
+ std::vector<int> all;
+ int mine = -1;
+ if (world.rank() == 0) {
+ all.resize(world.size());
+ std::generate(all.begin(), all.end(), std::rand);
+ }
+ mpi::scatter(world, all, mine, 0);
+ for (int r = 0; r < world.size(); ++r) {
+ world.barrier();
+ if (r == world.rank()) {
+ std::cout << "Rank " << r << " got " << mine << '\n';
+ }
+ }
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/reduce_performance_test.cpp b/src/boost/libs/mpi/example/reduce_performance_test.cpp
new file mode 100644
index 000000000..40411b14a
--- /dev/null
+++ b/src/boost/libs/mpi/example/reduce_performance_test.cpp
@@ -0,0 +1,138 @@
+// Copyright (C) 2006 Trustees of Indiana University
+//
+// Authors: Douglas Gregor
+// Andrew Lumsdaine
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// Performance test of the reduce() collective
+#include <boost/mpi.hpp>
+#include <boost/lexical_cast.hpp>
+
+namespace mpi = boost::mpi;
+
+struct add_int {
+ int operator()(int x, int y) const { return x + y; }
+};
+
+struct wrapped_int
+{
+ wrapped_int() : value(0) { }
+ wrapped_int(int value) : value(value) { }
+
+ template<typename Archiver>
+ void serialize(Archiver& ar, const unsigned int /*version*/) {
+ ar & value;
+ }
+
+ int value;
+};
+
+inline wrapped_int operator+(wrapped_int x, wrapped_int y)
+{
+ return wrapped_int(x.value + y.value);
+}
+
+namespace boost { namespace mpi {
+ template<> struct is_mpi_datatype<wrapped_int> : mpl::true_ { };
+} }
+
+struct serialized_int
+{
+ serialized_int() : value(0) { }
+ serialized_int(int value) : value(value) { }
+
+ template<typename Archiver>
+ void serialize(Archiver& ar, const unsigned int /*version*/) {
+ ar & value;
+ }
+
+ int value;
+};
+
+inline serialized_int operator+(serialized_int x, serialized_int y)
+{
+ return serialized_int(x.value + y.value);
+}
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ int repeat_count = 100;
+ int outer_repeat_count = 2;
+
+ if (argc > 1) repeat_count = boost::lexical_cast<int>(argv[1]);
+ if (argc > 2) outer_repeat_count = boost::lexical_cast<int>(argv[2]);
+
+ if (world.rank() == 0)
+ std::cout << "# of processors: " << world.size() << std::endl
+ << "# of iterations: " << repeat_count << std::endl;
+
+ int value = world.rank();
+ int result;
+ wrapped_int wi_value = world.rank();
+ wrapped_int wi_result;
+ serialized_int si_value = world.rank();
+ serialized_int si_result;
+
+ // Spin for a while...
+ for (int i = 0; i < repeat_count/10; ++i) {
+ reduce(world, value, result, std::plus<int>(), 0);
+ reduce(world, value, result, add_int(), 0);
+ reduce(world, wi_value, wi_result, std::plus<wrapped_int>(), 0);
+ reduce(world, si_value, si_result, std::plus<serialized_int>(), 0);
+ }
+
+ for (int outer = 0; outer < outer_repeat_count; ++outer) {
+ // Raw MPI
+ mpi::timer time;
+ for (int i = 0; i < repeat_count; ++i) {
+ MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ }
+ double reduce_raw_mpi_total_time = time.elapsed();
+
+ // MPI_INT/MPI_SUM case
+ time.restart();
+ for (int i = 0; i < repeat_count; ++i) {
+ reduce(world, value, result, std::plus<int>(), 0);
+ }
+ double reduce_int_sum_total_time = time.elapsed();
+
+ // MPI_INT/MPI_Op case
+ time.restart();
+ for (int i = 0; i < repeat_count; ++i) {
+ reduce(world, value, result, add_int(), 0);
+ }
+ double reduce_int_op_total_time = time.elapsed();
+
+ // MPI_Datatype/MPI_Op case
+ time.restart();
+ for (int i = 0; i < repeat_count; ++i) {
+ reduce(world, wi_value, wi_result, std::plus<wrapped_int>(), 0);
+ }
+ double reduce_type_op_total_time = time.elapsed();
+
+ // Serialized/MPI_Op case
+ time.restart();
+ for (int i = 0; i < repeat_count; ++i) {
+ reduce(world, si_value, si_result, std::plus<serialized_int>(), 0);
+ }
+ double reduce_ser_op_total_time = time.elapsed();
+
+
+ if (world.rank() == 0)
+ std::cout << "\nInvocation\tElapsed Time (seconds)"
+ << "\nRaw MPI\t\t\t" << reduce_raw_mpi_total_time
+ << "\nMPI_INT/MPI_SUM\t\t" << reduce_int_sum_total_time
+ << "\nMPI_INT/MPI_Op\t\t" << reduce_int_op_total_time
+ << "\nMPI_Datatype/MPI_Op\t" << reduce_type_op_total_time
+ << "\nSerialized/MPI_Op\t" << reduce_ser_op_total_time
+ << std::endl;
+ }
+
+ return 0;
+}
diff --git a/src/boost/libs/mpi/example/string_cat.cpp b/src/boost/libs/mpi/example/string_cat.cpp
new file mode 100644
index 000000000..5602d5964
--- /dev/null
+++ b/src/boost/libs/mpi/example/string_cat.cpp
@@ -0,0 +1,46 @@
+// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com>
+
+// Use, modification and distribution is subject to the Boost Software
+// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+// An example using Boost.MPI's reduce() to concatenate strings.
+#include <boost/mpi.hpp>
+#include <iostream>
+#include <string>
+#include <boost/serialization/string.hpp> // Important for sending strings!
+namespace mpi = boost::mpi;
+
+/* Defining STRING_CONCAT_COMMUTATIVE lies to Boost.MPI by forcing it
+ * to assume that string concatenation is commutative, which it is
+ * not. However, doing so illustrates how the results of a reduction
+ * can change when a non-commutative operator is assumed to be
+ * commutative.
+ */
+#ifdef STRING_CONCAT_COMMUTATIVE
+namespace boost { namespace mpi {
+
+template<>
+struct is_commutative<std::plus<std::string>, std::string> : mpl::true_ { };
+
+} } // end namespace boost::mpi
+#endif
+
+int main(int argc, char* argv[])
+{
+ mpi::environment env(argc, argv);
+ mpi::communicator world;
+
+ std::string names[10] = { "zero ", "one ", "two ", "three ", "four ",
+ "five ", "six ", "seven ", "eight ", "nine " };
+
+ std::string result;
+ reduce(world,
+ world.rank() < 10? names[world.rank()] : std::string("many "),
+ result, std::plus<std::string>(), 0);
+
+ if (world.rank() == 0)
+ std::cout << "The result is " << result << std::endl;
+
+ return 0;
+}