diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
commit | 19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch) | |
tree | 42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/boost/libs/mpi | |
parent | Initial commit. (diff) | |
download | ceph-upstream.tar.xz ceph-upstream.zip |
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
102 files changed, 9395 insertions, 0 deletions
diff --git a/src/boost/libs/mpi/build/Jamfile.v2 b/src/boost/libs/mpi/build/Jamfile.v2 new file mode 100644 index 000000000..084838458 --- /dev/null +++ b/src/boost/libs/mpi/build/Jamfile.v2 @@ -0,0 +1,139 @@ +# Copyright (C) 2005, 2006 The Trustees of Indiana University. +# Copyright (C) 2005 Douglas Gregor <doug.gregor -at- gmail.com> +# Copyright (c) 2018 Stefan Seefeld + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Authors: Douglas Gregor +# Andrew Lumsdaine + +import mpi ; +import indirect ; +import python ; +import option ; +import regex ; + +# +# The `version-suffix` rule really belongs into python.jam, and +# should be moved there. `split-version` is only duplicated here +# as a prerequisite. (See https://github.com/boostorg/build/pull/290) +# + + +mpi_python_libs = ; + +if [ mpi.configured ] +{ + +project boost/mpi + : source-location ../src + ; + +lib boost_mpi + : + broadcast.cpp + cartesian_communicator.cpp + communicator.cpp + computation_tree.cpp + content_oarchive.cpp + environment.cpp + error_string.cpp + exception.cpp + graph_communicator.cpp + group.cpp + intercommunicator.cpp + mpi_datatype_cache.cpp + mpi_datatype_oarchive.cpp + packed_iarchive.cpp + packed_oarchive.cpp + packed_skeleton_iarchive.cpp + packed_skeleton_oarchive.cpp + point_to_point.cpp + request.cpp + text_skeleton_oarchive.cpp + timer.cpp + offsets.cpp + : # Requirements + <library>../../serialization/build//boost_serialization + <library>/mpi//mpi [ mpi.extra-requirements ] + <define>BOOST_MPI_SOURCE=1 + <link>shared:<define>BOOST_MPI_DYN_LINK=1 + <local-visibility>global + : # Default build + <link>shared + : # Usage requirements + <library>../../serialization/build//boost_serialization + <library>/mpi//mpi [ mpi.extra-requirements ] + ; + + if [ python.configured ] + { + lib boost_mpi_python + : # Sources + python/serialize.cpp + : # Requirements + <library>boost_mpi + <library>/mpi//mpi [ mpi.extra-requirements ] + <library>/boost/python//boost_python + <link>shared:<define>BOOST_MPI_DYN_LINK=1 + <link>shared:<define>BOOST_MPI_PYTHON_DYN_LINK=1 + <link>shared:<define>BOOST_PYTHON_DYN_LINK=1 + <define>BOOST_MPI_PYTHON_SOURCE=1 + <python-debugging>on:<define>BOOST_DEBUG_PYTHON + -<tag>@$(BOOST_JAMROOT_MODULE)%$(BOOST_JAMROOT_MODULE).tag + <tag>@python-tag + <conditional>@python.require-py + <local-visibility>global + + : # Default build + <link>shared + : # Usage requirements + <library>/mpi//mpi [ mpi.extra-requirements ] + ; + + python-extension mpi + : # Sources + python/collectives.cpp + python/py_communicator.cpp + python/datatypes.cpp + python/documentation.cpp + python/py_environment.cpp + python/py_nonblocking.cpp + python/py_exception.cpp + python/module.cpp + python/py_request.cpp + python/skeleton_and_content.cpp + python/status.cpp + python/py_timer.cpp + : # Requirements + <library>/boost/python//boost_python + <library>boost_mpi_python + <library>boost_mpi + <library>/mpi//mpi [ mpi.extra-requirements ] + <link>shared:<define>BOOST_MPI_DYN_LINK=1 + <link>shared:<define>BOOST_MPI_PYTHON_DYN_LINK=1 + <link>shared:<define>BOOST_PYTHON_DYN_LINK=1 + <link>shared <runtime-link>shared + <python-debugging>on:<define>BOOST_DEBUG_PYTHON + ; + + mpi_python_libs = boost_mpi_python mpi ; + } +} +else if ! ( --without-mpi in [ modules.peek : ARGV ] ) +{ + message boost_mpi + : "warning: skipping optional Message Passing Interface (MPI) library." + : "note: to enable MPI support, add \"using mpi ;\" to user-config.jam." + : "note: to suppress this message, pass \"--without-mpi\" to bjam." + : "note: otherwise, you can safely ignore this message." + ; +} +else +{ + alias boost_mpi ; +} + +boost-install boost_mpi $(mpi_python_libs) ; diff --git a/src/boost/libs/mpi/build/__init__.py b/src/boost/libs/mpi/build/__init__.py new file mode 100644 index 000000000..ffd3862ea --- /dev/null +++ b/src/boost/libs/mpi/build/__init__.py @@ -0,0 +1,10 @@ +import sys +if sys.platform == 'linux2': + import DLFCN as dl + flags = sys.getdlopenflags() + sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL) + import mpi + sys.setdlopenflags(flags) +else: + import mpi + diff --git a/src/boost/libs/mpi/example/cartesian_communicator.cpp b/src/boost/libs/mpi/example/cartesian_communicator.cpp new file mode 100644 index 000000000..7b011041d --- /dev/null +++ b/src/boost/libs/mpi/example/cartesian_communicator.cpp @@ -0,0 +1,42 @@ +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Alain Miniussi + +#include <vector> +#include <iostream> + +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/collectives.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/cartesian_communicator.hpp> + +namespace mpi = boost::mpi; +// Curly brace init make this useless, but +// - Need to support obsolete like g++ 4.3.x. for some reason +// - Can't conditionnaly compile with bjam (unless you find +// the doc, and read it, which would only make sense if you +// actually wan't to use bjam, which does not (make sense)) +typedef mpi::cartesian_dimension cd; + +int main(int argc, char* argv[]) +{ + mpi::environment env; + mpi::communicator world; + + if (world.size() != 24) return -1; + mpi::cartesian_dimension dims[] = {cd(2, true), cd(3,true), cd(4,true)}; + mpi::cartesian_communicator cart(world, mpi::cartesian_topology(dims)); + for (int r = 0; r < cart.size(); ++r) { + cart.barrier(); + if (r == cart.rank()) { + std::vector<int> c = cart.coordinates(r); + std::cout << "rk :" << r << " coords: " + << c[0] << ' ' << c[1] << ' ' << c[2] << '\n'; + } + } + return 0; +} + diff --git a/src/boost/libs/mpi/example/generate_collect.cpp b/src/boost/libs/mpi/example/generate_collect.cpp new file mode 100644 index 000000000..5579d50d2 --- /dev/null +++ b/src/boost/libs/mpi/example/generate_collect.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's split() operation on communicators to +// create separate data-generating processes and data-collecting +// processes. +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +#include <boost/serialization/vector.hpp> +namespace mpi = boost::mpi; + +enum message_tags { msg_data_packet, msg_broadcast_data, msg_finished }; + +void generate_data(mpi::communicator local, mpi::communicator world) +{ + using std::srand; + using std::rand; + + // The rank of the collector within the world communicator + int master_collector = local.size(); + + srand(time(0) + world.rank()); + + // Send out several blocks of random data to the collectors. + int num_data_blocks = rand() % 3 + 1; + for (int block = 0; block < num_data_blocks; ++block) { + // Generate some random data + int num_samples = rand() % 1000; + std::vector<int> data; + for (int i = 0; i < num_samples; ++i) { + data.push_back(rand()); + } + + // Send our data to the master collector process. + std::cout << "Generator #" << local.rank() << " sends some data..." + << std::endl; + world.send(master_collector, msg_data_packet, data); + } + + // Wait for all of the generators to complete + (local.barrier)(); + + // The first generator will send the message to the master collector + // indicating that we're done. + if (local.rank() == 0) + world.send(master_collector, msg_finished); +} + +void collect_data(mpi::communicator local, mpi::communicator world) +{ + // The rank of the collector within the world communicator + int master_collector = world.size() - local.size(); + + if (world.rank() == master_collector) { + while (true) { + // Wait for a message + mpi::status msg = world.probe(); + if (msg.tag() == msg_data_packet) { + // Receive the packet of data + std::vector<int> data; + world.recv(msg.source(), msg.tag(), data); + + // Tell each of the collectors that we'll be broadcasting some data + for (int dest = 1; dest < local.size(); ++dest) + local.send(dest, msg_broadcast_data, msg.source()); + + // Broadcast the actual data. + broadcast(local, data, 0); + } else if (msg.tag() == msg_finished) { + // Receive the message + world.recv(msg.source(), msg.tag()); + + // Tell each of the collectors that we're finished + for (int dest = 1; dest < local.size(); ++dest) + local.send(dest, msg_finished); + + break; + } + } + } else { + while (true) { + // Wait for a message from the master collector + mpi::status msg = local.probe(); + if (msg.tag() == msg_broadcast_data) { + // Receive the broadcast message + int originator; + local.recv(msg.source(), msg.tag(), originator); + + // Receive the data broadcasted from the master collector + std::vector<int> data; + broadcast(local, data, 0); + + std::cout << "Collector #" << local.rank() + << " is processing data from generator #" << originator + << "." << std::endl; + } else if (msg.tag() == msg_finished) { + // Receive the message + local.recv(msg.source(), msg.tag()); + + break; + } + } + } +} + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + if (world.size() < 3) { + if (world.rank() == 0) { + std::cerr << "Error: this example requires at least 3 processes." + << std::endl; + } + env.abort(-1); + } + + bool is_generator = world.rank() < 2 * world.size() / 3; + mpi::communicator local = world.split(is_generator? 0 : 1); + if (is_generator) generate_data(local, world); + else collect_data(local, world); + + return 0; +} diff --git a/src/boost/libs/mpi/example/generate_collect_optional.cpp b/src/boost/libs/mpi/example/generate_collect_optional.cpp new file mode 100644 index 000000000..3aa3888c4 --- /dev/null +++ b/src/boost/libs/mpi/example/generate_collect_optional.cpp @@ -0,0 +1,113 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's split() operation on communicators to +// create separate data-generating processes and data-collecting +// processes using boost::optional for broadcasting. +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +#include <boost/serialization/vector.hpp> +#include <boost/serialization/optional.hpp> +namespace mpi = boost::mpi; + +enum message_tags { msg_data_packet, msg_finished }; + +void generate_data(mpi::communicator local, mpi::communicator world) +{ + using std::srand; + using std::rand; + + // The rank of the collector within the world communicator + int master_collector = local.size(); + + srand(time(0) + world.rank()); + + // Send out several blocks of random data to the collectors. + int num_data_blocks = rand() % 3 + 1; + for (int block = 0; block < num_data_blocks; ++block) { + // Generate some random dataa + int num_samples = rand() % 1000; + std::vector<int> data; + for (int i = 0; i < num_samples; ++i) { + data.push_back(rand()); + } + + // Send our data to the master collector process. + std::cout << "Generator #" << local.rank() << " sends some data..." + << std::endl; + world.send(master_collector, msg_data_packet, data); + } + + // Wait for all of the generators to complete + (local.barrier)(); + + // The first generator will send the message to the master collector + // indicating that we're done. + if (local.rank() == 0) + world.send(master_collector, msg_finished); +} + +void collect_data(mpi::communicator local, mpi::communicator world) +{ + // The rank of the collector within the world communicator + int master_collector = world.size() - local.size(); + + if (world.rank() == master_collector) { + while (true) { + // Wait for a message + mpi::status msg = world.probe(); + if (msg.tag() == msg_data_packet) { + // Receive the packet of data into a boost::optional + boost::optional<std::vector<int> > data; + data = std::vector<int>(); + world.recv(msg.source(), msg.source(), *data); + + // Broadcast the actual data. + broadcast(local, data, 0); + } else if (msg.tag() == msg_finished) { + // Receive the message + world.recv(msg.source(), msg.tag()); + + // Broadcast to each collector to tell them we've finished. + boost::optional<std::vector<int> > data; + broadcast(local, data, 0); + break; + } + } + } else { + boost::optional<std::vector<int> > data; + do { + // Wait for a broadcast from the master collector + broadcast(local, data, 0); + if (data) { + std::cout << "Collector #" << local.rank() + << " is processing data." << std::endl; + } + } while (data); + } +} + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + if (world.size() < 4) { + if (world.rank() == 0) { + std::cerr << "Error: this example requires at least 4 processes." + << std::endl; + } + env.abort(-1); + } + + bool is_generator = world.rank() < 2 * world.size() / 3; + mpi::communicator local = world.split(is_generator? 0 : 1); + if (is_generator) generate_data(local, world); + else collect_data(local, world); + + return 0; +} diff --git a/src/boost/libs/mpi/example/global_min.cpp b/src/boost/libs/mpi/example/global_min.cpp new file mode 100644 index 000000000..68de57dff --- /dev/null +++ b/src/boost/libs/mpi/example/global_min.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's all_reduce() that compute the minimum +// of each process's value and broadcast the result to all the processes. + +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::srand(world.rank()); + int my_number = std::rand(); + int minimum; + + all_reduce(world, my_number, minimum, mpi::minimum<int>()); + + if (world.rank() == 0) { + std::cout << "The minimum value is " << minimum << std::endl; + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/hello_world.cpp b/src/boost/libs/mpi/example/hello_world.cpp new file mode 100644 index 000000000..7095c1724 --- /dev/null +++ b/src/boost/libs/mpi/example/hello_world.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A simple Hello, world! example using Boost.MPI message passing. + +#include <boost/mpi.hpp> +#include <iostream> +#include <boost/serialization/string.hpp> // Needed to send/receive strings! +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + if (world.rank() == 0) { + world.send(1, 0, std::string("Hello")); + std::string msg; + world.recv(1, 1, msg); + std::cout << msg << "!" << std::endl; + } else { + std::string msg; + world.recv(0, 0, msg); + std::cout << msg << ", "; + std::cout.flush(); + world.send(0, 1, std::string("world")); + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/hello_world_broadcast.cpp b/src/boost/libs/mpi/example/hello_world_broadcast.cpp new file mode 100644 index 000000000..4ffe239ea --- /dev/null +++ b/src/boost/libs/mpi/example/hello_world_broadcast.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A simple Hello, world! example using Boost.MPI broadcast() + +#include <boost/mpi.hpp> +#include <iostream> +#include <boost/serialization/string.hpp> // Needed to send/receive strings! +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::string value; + if (world.rank() == 0) { + value = "Hello, World!"; + } + + broadcast(world, value, 0); + + std::cout << "Process #" << world.rank() << " says " << value << std::endl; + return 0; +} diff --git a/src/boost/libs/mpi/example/hello_world_groups.cpp b/src/boost/libs/mpi/example/hello_world_groups.cpp new file mode 100644 index 000000000..7493aa4a3 --- /dev/null +++ b/src/boost/libs/mpi/example/hello_world_groups.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2013 Andreas Hehn <hehn@phys.ethz.ch>, ETH Zurich +// based on +// hellp-world_broadcast.cpp (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A simple Hello world! example +// using boost::mpi::group and boost::mpi::broadcast() + +#include <stdexcept> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/group.hpp> +#include <boost/mpi/collectives.hpp> + +#include <boost/serialization/string.hpp> + +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + if(world.size() < 2) + throw std::runtime_error("Please run with at least 2 MPI processes!"); + + int group_a_ranks[2] = {0,1}; + + mpi::group world_group = world.group(); + mpi::group group_a = world_group.include(group_a_ranks,group_a_ranks+2); + + mpi::communicator comm_a(world,group_a); + + std::string value("Hello world!"); + if(comm_a) + { + if(comm_a.rank() == 0) { + value = "Hello group a!"; + } + mpi::broadcast(comm_a, value, 0); + } + std::cout << "Process #" << world.rank() << " says " << value << std::endl; + return 0; +} diff --git a/src/boost/libs/mpi/example/hello_world_nonblocking.cpp b/src/boost/libs/mpi/example/hello_world_nonblocking.cpp new file mode 100644 index 000000000..c65247b99 --- /dev/null +++ b/src/boost/libs/mpi/example/hello_world_nonblocking.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A simple Hello, world! example using Boost.MPI message passing. + +#include <boost/mpi.hpp> +#include <iostream> +#include <boost/serialization/string.hpp> // Needed to send/receive strings! +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + if (world.rank() == 0) { + mpi::request reqs[2]; + std::string msg, out_msg = "Hello"; + reqs[0] = world.isend(1, 0, out_msg); + reqs[1] = world.irecv(1, 1, msg); + mpi::wait_all(reqs, reqs + 2); + std::cout << msg << "!" << std::endl; + } else { + mpi::request reqs[2]; + std::string msg, out_msg = "world"; + reqs[0] = world.isend(0, 1, out_msg); + reqs[1] = world.irecv(0, 0, msg); + mpi::wait_all(reqs, reqs + 2); + std::cout << msg << ", "; + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/in_place_global_min.cpp b/src/boost/libs/mpi/example/in_place_global_min.cpp new file mode 100644 index 000000000..7da2cf2bf --- /dev/null +++ b/src/boost/libs/mpi/example/in_place_global_min.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's all_reduce() that compute the minimum +// of each process's value and broadcast the result to all the processes. +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::srand(world.rank()); + int my_number = std::rand(); + + all_reduce(world, my_number, mpi::minimum<int>()); + + if (world.rank() == 0) { + std::cout << "The minimum value is " << my_number << std::endl; + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/parallel_example.cpp b/src/boost/libs/mpi/example/parallel_example.cpp new file mode 100644 index 000000000..00347d51c --- /dev/null +++ b/src/boost/libs/mpi/example/parallel_example.cpp @@ -0,0 +1,196 @@ +// Copyright (C) 2005-2006 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example of a parallel Monte Carlo simulation using some nodes to produce +// data and others to aggregate the data +#include <iostream> + +#include <boost/mpi.hpp> +#include <boost/random/parallel.hpp> +#include <boost/random.hpp> +#include <boost/foreach.hpp> +#include <iostream> +#include <cstdlib> + +namespace mpi = boost::mpi; + +enum {sample_tag, sample_skeleton_tag, sample_broadcast_tag, quit_tag}; + + +void calculate_samples(int sample_length) +{ + int num_samples = 100; + std::vector<double> sample(sample_length); + + // setup communicator by splitting + + mpi::communicator world; + mpi::communicator calculate_communicator = world.split(0); + + unsigned int num_calculate_ranks = calculate_communicator.size(); + + // the master of the accumulaion ranks is the first of them, hence + // with a rank just one after the last calculation rank + int master_accumulate_rank = num_calculate_ranks; + + // the master of the calculation ranks sends the skeleton of the sample + // to the master of the accumulation ranks + + if (world.rank()==0) + world.send(master_accumulate_rank,sample_skeleton_tag,mpi::skeleton(sample)); + + // next we extract the content of the sample vector, to be used in sending + // the content later on + + mpi::content sample_content = mpi::get_content(sample); + + // now intialize the parallel random number generator + + boost::lcg64 engine( + boost::random::stream_number = calculate_communicator.rank(), + boost::random::total_streams = calculate_communicator.size() + ); + + boost::variate_generator<boost::lcg64&,boost::uniform_real<> > + rng(engine,boost::uniform_real<>()); + + for (unsigned int i=0; i<num_samples/num_calculate_ranks+1;++i) { + + // calculate sample by filling the vector with random numbers + // note that std::generate will not work since it takes the generator + // by value, and boost::ref cannot be used as a generator. + // boost::ref should be fixed so that it can be used as generator + + BOOST_FOREACH(double& x, sample) + x = rng(); + + // send sample to accumulation ranks + // Ideally we want to do this as a broadcast with an inter-communicator + // between the calculation and accumulation ranks. MPI2 should support + // this, but here we present an MPI1 compatible solution. + + // send content of sample to first (master) accumulation process + + world.send(master_accumulate_rank,sample_tag,sample_content); + + // gather some results from all calculation ranks + + double local_result = sample[0]; + std::vector<double> gathered_results(calculate_communicator.size()); + mpi::all_gather(calculate_communicator,local_result,gathered_results); + } + + // we are done: the master tells the accumulation ranks to quit + if (world.rank()==0) + world.send(master_accumulate_rank,quit_tag); +} + + + +void accumulate_samples() +{ + std::vector<double> sample; + + // setup the communicator for all accumulation ranks by splitting + + mpi::communicator world; + mpi::communicator accumulate_communicator = world.split(1); + + bool is_master_accumulate_rank = accumulate_communicator.rank()==0; + + // the master receives the sample skeleton + + if (is_master_accumulate_rank) + world.recv(0,sample_skeleton_tag,mpi::skeleton(sample)); + + // and broadcasts it to all accumulation ranks + mpi::broadcast(accumulate_communicator,mpi::skeleton(sample),0); + + // next we extract the content of the sample vector, to be used in receiving + // the content later on + + mpi::content sample_content = mpi::get_content(sample); + + // accumulate until quit is called + double sum=0.; + while (true) { + + + // the accumulation master checks whether we should quit + if (world.iprobe(0,quit_tag)) { + world.recv(0,quit_tag); + for (int i=1; i<accumulate_communicator.size();++i) + accumulate_communicator.send(i,quit_tag); + std::cout << sum << "\n"; + break; // We're done + } + + // the otehr accumulation ranks check whether we should quit + if (accumulate_communicator.iprobe(0,quit_tag)) { + accumulate_communicator.recv(0,quit_tag); + std::cout << sum << "\n"; + break; // We're done + } + + // check whether the master accumulation rank has received a sample + if (world.iprobe(mpi::any_source,sample_tag)) { + BOOST_ASSERT(is_master_accumulate_rank); + + // receive the content + world.recv(mpi::any_source,sample_tag,sample_content); + + // now we need to braodcast + // the problam is we do not have a non-blocking broadcast that we could + // abort if we receive a quit message from the master. We thus need to + // first tell all accumulation ranks to start a broadcast. If the sample + // is small, we could just send the sample in this message, but here we + // optimize the code for large samples, so that the overhead of these + // sends can be ignored, and we count on an optimized broadcast + // implementation with O(log N) complexity + + for (int i=1; i<accumulate_communicator.size();++i) + accumulate_communicator.send(i,sample_broadcast_tag); + + // now broadcast the contents of the sample to all accumulate ranks + mpi::broadcast(accumulate_communicator,sample_content,0); + + // and handle the sample by summing the appropriate value + sum += sample[0]; + } + + // the other accumulation ranks wait for a mesage to start the broadcast + if (accumulate_communicator.iprobe(0,sample_broadcast_tag)) { + BOOST_ASSERT(!is_master_accumulate_rank); + + accumulate_communicator.recv(0,sample_broadcast_tag); + + // receive broadcast of the sample contents + mpi::broadcast(accumulate_communicator,sample_content,0); + + // and handle the sample + + // and handle the sample by summing the appropriate value + sum += sample[accumulate_communicator.rank()]; + } + } +} + +int main(int argc, char** argv) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + // half of the processes generate, the others accumulate + // the sample size is just the number of accumulation ranks + if (world.rank() < world.size()/2) + calculate_samples(world.size()-world.size()/2); + else + accumulate_samples(); + + return 0; +} + + diff --git a/src/boost/libs/mpi/example/python/hello_world.py b/src/boost/libs/mpi/example/python/hello_world.py new file mode 100644 index 000000000..73312483b --- /dev/null +++ b/src/boost/libs/mpi/example/python/hello_world.py @@ -0,0 +1,16 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +import boost.parallel.mpi as mpi + +if mpi.world.rank == 0: + mpi.world.send(1, 0, 'Hello') + msg = mpi.world.recv(1, 1) + print msg,'!' +else: + msg = mpi.world.recv(0, 0) + print msg,', ', + mpi.world.send(0, 1, 'world') diff --git a/src/boost/libs/mpi/example/random_content.cpp b/src/boost/libs/mpi/example/random_content.cpp new file mode 100644 index 000000000..22fd06d87 --- /dev/null +++ b/src/boost/libs/mpi/example/random_content.cpp @@ -0,0 +1,98 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's skeletons and content to optimize +// communication. +#include <boost/mpi.hpp> +#include <boost/serialization/list.hpp> +#include <algorithm> +#include <functional> +#include <numeric> +#include <iostream> +#include <stdlib.h> +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + if (world.size() < 2 || world.size() > 4) { + if (world.rank() == 0) + std::cerr << "error: please execute this program with 2-4 processes.\n"; + world.abort(-1); + } + + if (world.rank() == 0) { + int list_len = 50; + int iterations = 10; + + if (argc > 1) list_len = atoi(argv[1]); + if (argc > 2) iterations = atoi(argv[2]); + + if (list_len <= 0) { + std::cerr << "error: please specific a list length greater than zero.\n"; + world.abort(-1); + } + + // Generate the list and broadcast its structure + std::list<int> l(list_len); + broadcast(world, mpi::skeleton(l), 0); + + // Generate content several times and broadcast out that content + mpi::content c = mpi::get_content(l); + for (int i = 0; i < iterations; ++i) { + do { + std::generate(l.begin(), l.end(), &random); + } while (std::find_if(l.begin(), l.end(), + std::bind1st(std::not_equal_to<int>(), 0)) + == l.end()); + + + std::cout << "Iteration #" << i << ": sending content" + << " (min = " << *std::min_element(l.begin(), l.end()) + << ", max = " << *std::max_element(l.begin(), l.end()) + << ", avg = " + << std::accumulate(l.begin(), l.end(), 0)/l.size() + << ").\n"; + + broadcast(world, c, 0); + } + + // Notify the slaves that we're done by sending all zeroes + std::fill(l.begin(), l.end(), 0); + broadcast(world, c, 0); + + } else { + // Receive the content and build up our own list + std::list<int> l; + broadcast(world, mpi::skeleton(l), 0); + + mpi::content c = mpi::get_content(l); + int i = 0; + do { + broadcast(world, c, 0); + + if (std::find_if(l.begin(), l.end(), + std::bind1st(std::not_equal_to<int>(), 0)) == l.end()) + break; + + if (world.rank() == 1) + std::cout << "Iteration #" << i << ": max value = " + << *std::max_element(l.begin(), l.end()) << ".\n"; + else if (world.rank() == 2) + std::cout << "Iteration #" << i << ": min value = " + << *std::min_element(l.begin(), l.end()) << ".\n"; + else if (world.rank() == 3) + std::cout << "Iteration #" << i << ": avg value = " + << std::accumulate(l.begin(), l.end(), 0)/l.size() + << ".\n"; + ++i; + } while (true); + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/random_gather.cpp b/src/boost/libs/mpi/example/random_gather.cpp new file mode 100644 index 000000000..17825f157 --- /dev/null +++ b/src/boost/libs/mpi/example/random_gather.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's gather(): [main] + +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::srand(time(0) + world.rank()); + int my_number = std::rand(); + if (world.rank() == 0) { + std::vector<int> all_numbers; + gather(world, my_number, all_numbers, 0); + for (int proc = 0; proc < world.size(); ++proc) + std::cout << "Process #" << proc << " thought of " << all_numbers[proc] + << std::endl; + } else { + gather(world, my_number, 0); + } + return 0; +} diff --git a/src/boost/libs/mpi/example/random_min.cpp b/src/boost/libs/mpi/example/random_min.cpp new file mode 100644 index 000000000..d0a67ee02 --- /dev/null +++ b/src/boost/libs/mpi/example/random_min.cpp @@ -0,0 +1,30 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's reduce() to compute a minimum value. +#include <boost/mpi.hpp> +#include <iostream> +#include <cstdlib> +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::srand(time(0) + world.rank()); + int my_number = std::rand(); + + if (world.rank() == 0) { + int minimum; + reduce(world, my_number, minimum, mpi::minimum<int>(), 0); + std::cout << "The minimum value is " << minimum << std::endl; + } else { + reduce(world, my_number, mpi::minimum<int>(), 0); + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/random_scatter.cpp b/src/boost/libs/mpi/example/random_scatter.cpp new file mode 100644 index 000000000..fc8879e4e --- /dev/null +++ b/src/boost/libs/mpi/example/random_scatter.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's gather(): [main] + +#include <boost/mpi.hpp> +#include <boost/mpi/collectives.hpp> +#include <iostream> +#include <cstdlib> +#include <vector> + +namespace mpi = boost::mpi; + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::srand(time(0) + world.rank()); + std::vector<int> all; + int mine = -1; + if (world.rank() == 0) { + all.resize(world.size()); + std::generate(all.begin(), all.end(), std::rand); + } + mpi::scatter(world, all, mine, 0); + for (int r = 0; r < world.size(); ++r) { + world.barrier(); + if (r == world.rank()) { + std::cout << "Rank " << r << " got " << mine << '\n'; + } + } + return 0; +} diff --git a/src/boost/libs/mpi/example/reduce_performance_test.cpp b/src/boost/libs/mpi/example/reduce_performance_test.cpp new file mode 100644 index 000000000..40411b14a --- /dev/null +++ b/src/boost/libs/mpi/example/reduce_performance_test.cpp @@ -0,0 +1,138 @@ +// Copyright (C) 2006 Trustees of Indiana University +// +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Performance test of the reduce() collective +#include <boost/mpi.hpp> +#include <boost/lexical_cast.hpp> + +namespace mpi = boost::mpi; + +struct add_int { + int operator()(int x, int y) const { return x + y; } +}; + +struct wrapped_int +{ + wrapped_int() : value(0) { } + wrapped_int(int value) : value(value) { } + + template<typename Archiver> + void serialize(Archiver& ar, const unsigned int /*version*/) { + ar & value; + } + + int value; +}; + +inline wrapped_int operator+(wrapped_int x, wrapped_int y) +{ + return wrapped_int(x.value + y.value); +} + +namespace boost { namespace mpi { + template<> struct is_mpi_datatype<wrapped_int> : mpl::true_ { }; +} } + +struct serialized_int +{ + serialized_int() : value(0) { } + serialized_int(int value) : value(value) { } + + template<typename Archiver> + void serialize(Archiver& ar, const unsigned int /*version*/) { + ar & value; + } + + int value; +}; + +inline serialized_int operator+(serialized_int x, serialized_int y) +{ + return serialized_int(x.value + y.value); +} + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + int repeat_count = 100; + int outer_repeat_count = 2; + + if (argc > 1) repeat_count = boost::lexical_cast<int>(argv[1]); + if (argc > 2) outer_repeat_count = boost::lexical_cast<int>(argv[2]); + + if (world.rank() == 0) + std::cout << "# of processors: " << world.size() << std::endl + << "# of iterations: " << repeat_count << std::endl; + + int value = world.rank(); + int result; + wrapped_int wi_value = world.rank(); + wrapped_int wi_result; + serialized_int si_value = world.rank(); + serialized_int si_result; + + // Spin for a while... + for (int i = 0; i < repeat_count/10; ++i) { + reduce(world, value, result, std::plus<int>(), 0); + reduce(world, value, result, add_int(), 0); + reduce(world, wi_value, wi_result, std::plus<wrapped_int>(), 0); + reduce(world, si_value, si_result, std::plus<serialized_int>(), 0); + } + + for (int outer = 0; outer < outer_repeat_count; ++outer) { + // Raw MPI + mpi::timer time; + for (int i = 0; i < repeat_count; ++i) { + MPI_Reduce(&value, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); + } + double reduce_raw_mpi_total_time = time.elapsed(); + + // MPI_INT/MPI_SUM case + time.restart(); + for (int i = 0; i < repeat_count; ++i) { + reduce(world, value, result, std::plus<int>(), 0); + } + double reduce_int_sum_total_time = time.elapsed(); + + // MPI_INT/MPI_Op case + time.restart(); + for (int i = 0; i < repeat_count; ++i) { + reduce(world, value, result, add_int(), 0); + } + double reduce_int_op_total_time = time.elapsed(); + + // MPI_Datatype/MPI_Op case + time.restart(); + for (int i = 0; i < repeat_count; ++i) { + reduce(world, wi_value, wi_result, std::plus<wrapped_int>(), 0); + } + double reduce_type_op_total_time = time.elapsed(); + + // Serialized/MPI_Op case + time.restart(); + for (int i = 0; i < repeat_count; ++i) { + reduce(world, si_value, si_result, std::plus<serialized_int>(), 0); + } + double reduce_ser_op_total_time = time.elapsed(); + + + if (world.rank() == 0) + std::cout << "\nInvocation\tElapsed Time (seconds)" + << "\nRaw MPI\t\t\t" << reduce_raw_mpi_total_time + << "\nMPI_INT/MPI_SUM\t\t" << reduce_int_sum_total_time + << "\nMPI_INT/MPI_Op\t\t" << reduce_int_op_total_time + << "\nMPI_Datatype/MPI_Op\t" << reduce_type_op_total_time + << "\nSerialized/MPI_Op\t" << reduce_ser_op_total_time + << std::endl; + } + + return 0; +} diff --git a/src/boost/libs/mpi/example/string_cat.cpp b/src/boost/libs/mpi/example/string_cat.cpp new file mode 100644 index 000000000..5602d5964 --- /dev/null +++ b/src/boost/libs/mpi/example/string_cat.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// An example using Boost.MPI's reduce() to concatenate strings. +#include <boost/mpi.hpp> +#include <iostream> +#include <string> +#include <boost/serialization/string.hpp> // Important for sending strings! +namespace mpi = boost::mpi; + +/* Defining STRING_CONCAT_COMMUTATIVE lies to Boost.MPI by forcing it + * to assume that string concatenation is commutative, which it is + * not. However, doing so illustrates how the results of a reduction + * can change when a non-commutative operator is assumed to be + * commutative. + */ +#ifdef STRING_CONCAT_COMMUTATIVE +namespace boost { namespace mpi { + +template<> +struct is_commutative<std::plus<std::string>, std::string> : mpl::true_ { }; + +} } // end namespace boost::mpi +#endif + +int main(int argc, char* argv[]) +{ + mpi::environment env(argc, argv); + mpi::communicator world; + + std::string names[10] = { "zero ", "one ", "two ", "three ", "four ", + "five ", "six ", "seven ", "eight ", "nine " }; + + std::string result; + reduce(world, + world.rank() < 10? names[world.rank()] : std::string("many "), + result, std::plus<std::string>(), 0); + + if (world.rank() == 0) + std::cout << "The result is " << result << std::endl; + + return 0; +} diff --git a/src/boost/libs/mpi/index.html b/src/boost/libs/mpi/index.html new file mode 100644 index 000000000..5dad703a1 --- /dev/null +++ b/src/boost/libs/mpi/index.html @@ -0,0 +1,16 @@ + +<!-- +Copyright 2005-2007 Daniel James. +Distributed under the Boost Software License, Version 1.0. (See accompanying +file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +--> + +<html> +<head> + <meta http-equiv="refresh" content="0; URL=../../doc/html/mpi.html"> +</head> +<body> +Automatic redirection failed, please go to +<a href="../../doc/html/mpi.html">../../doc/html/mpi.html</a> +</body> +</html> diff --git a/src/boost/libs/mpi/meta/libraries.json b/src/boost/libs/mpi/meta/libraries.json new file mode 100644 index 000000000..ab1e98fb7 --- /dev/null +++ b/src/boost/libs/mpi/meta/libraries.json @@ -0,0 +1,16 @@ +{ + "key": "mpi", + "name": "MPI", + "authors": [ + "Douglas Gregor", + "Matthias Troyer" + ], + "description": "Message Passing Interface library, for use in distributed-memory parallel application programming.", + "category": [ + "Concurrent" + ], + "maintainers": [ + "K. Noel Belcourt <kbelco -at- sandia.gov>", + "Alain Miniussi <alain.miniussi -at- oca.eu>" + ] +} diff --git a/src/boost/libs/mpi/src/broadcast.cpp b/src/boost/libs/mpi/src/broadcast.cpp new file mode 100644 index 000000000..e171b4865 --- /dev/null +++ b/src/boost/libs/mpi/src/broadcast.cpp @@ -0,0 +1,142 @@ +// Copyright 2005 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Message Passing Interface 1.1 -- Section 4.4. Broadcast + +#include <boost/mpi/config.hpp> +#include <boost/mpi/collectives/broadcast.hpp> +#include <boost/mpi/skeleton_and_content.hpp> +#include <boost/mpi/detail/point_to_point.hpp> +#include <boost/mpi/environment.hpp> +#include <cassert> + +namespace boost { namespace mpi { + +template<> +void +broadcast<const packed_oarchive>(const communicator& comm, + const packed_oarchive& oa, + int root) +{ + // Only the root can broadcast the packed_oarchive + assert(comm.rank() == root); + + int size = comm.size(); + if (size < 2) return; + + // Determine maximum tag value + int tag = environment::collectives_tag(); + + // Broadcast data to all nodes + std::vector<request> requests(size-1); + std::vector<request>::iterator it = requests.begin(); + for (int dest = 0; dest < size; ++dest) { + if (dest != root) { + *it++ = detail::packed_archive_isend(comm, dest, tag, oa); + } + } + wait_all(requests.begin(), requests.end()); +} + +template<> +void +broadcast<packed_oarchive>(const communicator& comm, packed_oarchive& oa, + int root) +{ + broadcast(comm, const_cast<const packed_oarchive&>(oa), root); +} + +template<> +void +broadcast<packed_iarchive>(const communicator& comm, packed_iarchive& ia, + int root) +{ + int size = comm.size(); + if (size < 2) return; + + // Determine maximum tag value + int tag = environment::collectives_tag(); + + // Receive data from the root. + if (comm.rank() != root) { + MPI_Status status; + detail::packed_archive_recv(comm, root, tag, ia, status); + } else { + // Broadcast data to all nodes + std::vector<request> requests(size-1); + std::vector<request>::iterator it = requests.begin(); + for (int dest = 0; dest < size; ++dest) { + if (dest != root) { + *it++ = detail::packed_archive_isend(comm, dest, tag, ia); + } + } + wait_all(requests.begin(), requests.end()); + } +} + +template<> +void +broadcast<const packed_skeleton_oarchive>(const communicator& comm, + const packed_skeleton_oarchive& oa, + int root) +{ + broadcast(comm, oa.get_skeleton(), root); +} + +template<> +void +broadcast<packed_skeleton_oarchive>(const communicator& comm, + packed_skeleton_oarchive& oa, int root) +{ + broadcast(comm, oa.get_skeleton(), root); +} + +template<> +void +broadcast<packed_skeleton_iarchive>(const communicator& comm, + packed_skeleton_iarchive& ia, int root) +{ + broadcast(comm, ia.get_skeleton(), root); +} + +template<> +void broadcast<content>(const communicator& comm, content& c, int root) +{ + broadcast(comm, const_cast<const content&>(c), root); +} + +template<> +void broadcast<const content>(const communicator& comm, const content& c, + int root) +{ +#if defined(BOOST_MPI_BCAST_BOTTOM_WORKS_FINE) + BOOST_MPI_CHECK_RESULT(MPI_Bcast, + (MPI_BOTTOM, 1, c.get_mpi_datatype(), + root, comm)); +#else + if (comm.size() < 2) + return; + + // Some versions of LAM/MPI behave badly when broadcasting using + // MPI_BOTTOM, so we'll instead use manual send/recv operations. + if (comm.rank() == root) { + for (int p = 0; p < comm.size(); ++p) { + if (p != root) { + BOOST_MPI_CHECK_RESULT(MPI_Send, + (MPI_BOTTOM, 1, c.get_mpi_datatype(), + p, environment::collectives_tag(), comm)); + } + } + } else { + BOOST_MPI_CHECK_RESULT(MPI_Recv, + (MPI_BOTTOM, 1, c.get_mpi_datatype(), + root, environment::collectives_tag(), + comm, MPI_STATUS_IGNORE)); + } +#endif +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/cartesian_communicator.cpp b/src/boost/libs/mpi/src/cartesian_communicator.cpp new file mode 100644 index 000000000..a46f0bc2e --- /dev/null +++ b/src/boost/libs/mpi/src/cartesian_communicator.cpp @@ -0,0 +1,179 @@ + +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Alain Miniussi + +#include <algorithm> +#include <cassert> + +#include <boost/mpi/cartesian_communicator.hpp> + +namespace boost { namespace mpi { + +namespace { + template <typename T, typename A> + T* c_data(std::vector<T,A>& v) { return &(v[0]); } +} + +std::ostream& +operator<<(std::ostream& out, cartesian_dimension const& d) { + out << '(' << d.size << ','; + if (d.periodic) { + out << "periodic"; + } else { + out << "bounded"; + } + out << ')'; + return out; +} + +std::ostream& +operator<<(std::ostream& out, cartesian_topology const& topo) { + out << '{'; + int const sz = topo.size(); + for (int i = 0; i < sz; ++i) { + out << topo[i]; + if ( i < (sz-1) ) { + out << ','; + } + } + out << '}'; + return out; +} + +cartesian_communicator::cartesian_communicator(const communicator& comm, + const cartesian_topology& topology, + bool reorder ) + : communicator(MPI_COMM_NULL, comm_attach) +{ + std::vector<int> dims(topology.size()); + std::vector<int> periodic(topology.size()); + int tsz = topology.size(); + for(int i = 0; i < tsz; ++i) { + dims[i] = topology[i].size; + periodic[i] = topology[i].periodic; + } + // Fill the gaps, if any + if (std::count(dims.begin(), dims.end(), 0) > 0) { + cartesian_dimensions(comm, dims); + } + MPI_Comm newcomm; + BOOST_MPI_CHECK_RESULT(MPI_Cart_create, + ((MPI_Comm)comm, dims.size(), + c_data(dims), c_data(periodic), + int(reorder), &newcomm)); + if(newcomm != MPI_COMM_NULL) { + comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); + } +} + +cartesian_communicator::cartesian_communicator(const cartesian_communicator& comm, + const std::vector<int>& keep ) + : communicator(MPI_COMM_NULL, comm_attach) +{ + int const max_dims = comm.ndims(); + int const nbkept = keep.size(); + assert(nbkept <= max_dims); + std::vector<int> bitset(max_dims, int(false)); + for(int i = 0; i < nbkept; ++i) { + assert(keep[i] < max_dims); + bitset[keep[i]] = true; + } + + MPI_Comm newcomm; + BOOST_MPI_CHECK_RESULT(MPI_Cart_sub, + ((MPI_Comm)comm, c_data(bitset), &newcomm)); + if(newcomm != MPI_COMM_NULL) { + comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); + } +} + +int +cartesian_communicator::ndims() const { + int n = -1; + BOOST_MPI_CHECK_RESULT(MPI_Cartdim_get, + (MPI_Comm(*this), &n)); + return n; +} + +int +cartesian_communicator::rank(const std::vector<int>& coords ) const { + int r = -1; + assert(int(coords.size()) == ndims()); + BOOST_MPI_CHECK_RESULT(MPI_Cart_rank, + (MPI_Comm(*this), c_data(const_cast<std::vector<int>&>(coords)), + &r)); + return r; +} + +std::pair<int, int> +cartesian_communicator::shifted_ranks(int dim, int disp) const { + std::pair<int, int> r(-1,-1); + assert(0 <= dim && dim < ndims()); + BOOST_MPI_CHECK_RESULT(MPI_Cart_shift, + (MPI_Comm(*this), dim, disp, &(r.first), &(r.second))); + return r; +} + +std::vector<int> +cartesian_communicator::coordinates(int rk) const { + std::vector<int> cbuf(ndims()); + BOOST_MPI_CHECK_RESULT(MPI_Cart_coords, + (MPI_Comm(*this), rk, cbuf.size(), c_data(cbuf) )); + return cbuf; +} + +void +cartesian_communicator::topology( cartesian_topology& topo, + std::vector<int>& coords ) const { + int ndims = this->ndims(); + topo.resize(ndims); + coords.resize(ndims); + std::vector<int> cdims(ndims); + std::vector<int> cperiods(ndims); + BOOST_MPI_CHECK_RESULT(MPI_Cart_get, + (MPI_Comm(*this), ndims, c_data(cdims), c_data(cperiods), c_data(coords))); + cartesian_topology res(cdims.begin(), cperiods.begin(), ndims); + topo.swap(res); +} + +cartesian_topology +cartesian_communicator::topology() const { + cartesian_topology topo(ndims()); + std::vector<int> coords; + topology(topo, coords); + return topo; +} + +void +cartesian_topology::split(std::vector<int>& dims, std::vector<bool>& periodics) const { + int ndims = size(); + dims.resize(ndims); + periodics.resize(ndims); + for(int i = 0; i < ndims; ++i) { + cartesian_dimension const& d = (*this)[i]; + dims[i] = d.size; + periodics[i] = d.periodic; + } +} + +std::vector<int>& +cartesian_dimensions(int sz, std::vector<int>& dims) { + int min = 1; + int const dimsz = dims.size(); + for(int i = 0; i < dimsz; ++i) { + if (dims[i] > 0) { + min *= dims[i]; + } + } + int leftover = sz % min; + + BOOST_MPI_CHECK_RESULT(MPI_Dims_create, + (sz-leftover, dims.size(), c_data(dims))); + return dims; +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/communicator.cpp b/src/boost/libs/mpi/src/communicator.cpp new file mode 100644 index 000000000..016c6af41 --- /dev/null +++ b/src/boost/libs/mpi/src/communicator.cpp @@ -0,0 +1,328 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/group.hpp> +#include <boost/mpi/intercommunicator.hpp> +#include <boost/mpi/graph_communicator.hpp> +#include <boost/mpi/cartesian_communicator.hpp> +#include <boost/mpi/skeleton_and_content.hpp> +#include <boost/mpi/detail/point_to_point.hpp> + +namespace boost { namespace mpi { + +/*************************************************************************** + * status * + ***************************************************************************/ +bool status::cancelled() const +{ + int flag = 0; + BOOST_MPI_CHECK_RESULT(MPI_Test_cancelled, (&m_status, &flag)); + return flag != 0; +} + +/*************************************************************************** + * communicator * + ***************************************************************************/ + +communicator::communicator() +{ + comm_ptr.reset(new MPI_Comm(MPI_COMM_WORLD)); +} + +communicator::communicator(const MPI_Comm& comm, comm_create_kind kind) +{ + if (comm == MPI_COMM_NULL) + /* MPI_COMM_NULL indicates that the communicator is not usable. */ + return; + + switch (kind) { + case comm_duplicate: + { + MPI_Comm newcomm; + BOOST_MPI_CHECK_RESULT(MPI_Comm_dup, (comm, &newcomm)); + comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); + MPI_Comm_set_errhandler(newcomm, MPI_ERRORS_RETURN); + break; + } + + case comm_take_ownership: + comm_ptr.reset(new MPI_Comm(comm), comm_free()); + break; + + case comm_attach: + comm_ptr.reset(new MPI_Comm(comm)); + break; + } +} + +communicator::communicator(const communicator& comm, + const boost::mpi::group& subgroup) +{ + MPI_Comm newcomm; + BOOST_MPI_CHECK_RESULT(MPI_Comm_create, + ((MPI_Comm)comm, (MPI_Group)subgroup, &newcomm)); + if(newcomm != MPI_COMM_NULL) + comm_ptr.reset(new MPI_Comm(newcomm), comm_free()); +} + +int communicator::size() const +{ + int size_; + BOOST_MPI_CHECK_RESULT(MPI_Comm_size, (MPI_Comm(*this), &size_)); + return size_; +} + +int communicator::rank() const +{ + int rank_; + BOOST_MPI_CHECK_RESULT(MPI_Comm_rank, (MPI_Comm(*this), &rank_)); + return rank_; +} + +boost::mpi::group communicator::group() const +{ + MPI_Group gr; + BOOST_MPI_CHECK_RESULT(MPI_Comm_group, ((MPI_Comm)*this, &gr)); + return boost::mpi::group(gr, /*adopt=*/true); +} + +void communicator::send(int dest, int tag) const +{ + BOOST_MPI_CHECK_RESULT(MPI_Send, + (MPI_BOTTOM, 0, MPI_PACKED, + dest, tag, MPI_Comm(*this))); +} + +status communicator::recv(int source, int tag) const +{ + status stat; + BOOST_MPI_CHECK_RESULT(MPI_Recv, + (MPI_BOTTOM, 0, MPI_PACKED, + source, tag, MPI_Comm(*this), &stat.m_status)); + return stat; +} + +optional<status> communicator::iprobe(int source, int tag) const +{ + typedef optional<status> result_type; + + status stat; + int flag; + BOOST_MPI_CHECK_RESULT(MPI_Iprobe, + (source, tag, MPI_Comm(*this), &flag, + &stat.m_status)); + if (flag) return stat; + else return result_type(); +} + +status communicator::probe(int source, int tag) const +{ + status stat; + BOOST_MPI_CHECK_RESULT(MPI_Probe, + (source, tag, MPI_Comm(*this), &stat.m_status)); + return stat; +} + +void (communicator::barrier)() const +{ + BOOST_MPI_CHECK_RESULT(MPI_Barrier, (MPI_Comm(*this))); +} + + +communicator::operator MPI_Comm() const +{ + if (comm_ptr) return *comm_ptr; + else return MPI_COMM_NULL; +} + +communicator communicator::split(int color) const +{ + return split(color, rank()); +} + +communicator communicator::split(int color, int key) const +{ + MPI_Comm newcomm; + BOOST_MPI_CHECK_RESULT(MPI_Comm_split, + (MPI_Comm(*this), color, key, &newcomm)); + return communicator(newcomm, comm_take_ownership); +} + +optional<intercommunicator> communicator::as_intercommunicator() const +{ + int flag; + BOOST_MPI_CHECK_RESULT(MPI_Comm_test_inter, ((MPI_Comm)*this, &flag)); + if (flag) + return intercommunicator(comm_ptr); + else + return optional<intercommunicator>(); +} + +bool communicator::has_graph_topology() const +{ + bool is_graph = false; + // topology test not allowed on MPI_NULL_COMM + if (bool(*this)) { + int status; + BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status)); + is_graph = status == MPI_GRAPH; + } + return is_graph; +} + +optional<graph_communicator> communicator::as_graph_communicator() const +{ + if (has_graph_topology()) { + return graph_communicator(comm_ptr); + } else { + return optional<graph_communicator>(); + } +} + +bool communicator::has_cartesian_topology() const +{ + bool is_cart = false; + // topology test not allowed on MPI_NULL_COM + if (bool(*this)) { + int status; + BOOST_MPI_CHECK_RESULT(MPI_Topo_test, ((MPI_Comm)*this, &status)); + is_cart = status == MPI_CART; + } + return is_cart; +} + +optional<cartesian_communicator> communicator::as_cartesian_communicator() const +{ + if (has_cartesian_topology()) { + return cartesian_communicator(comm_ptr); + } else { + return optional<cartesian_communicator>(); + } +} + +void communicator::abort(int errcode) const +{ + BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_Comm(*this), errcode)); + std::abort(); +} + +/************************************************************* + * archived send/recv * + *************************************************************/ +template<> +void +communicator::send<packed_oarchive>(int dest, int tag, + const packed_oarchive& ar) const +{ + detail::packed_archive_send(*this, dest, tag, ar); +} + +template<> +void +communicator::send<packed_skeleton_oarchive> + (int dest, int tag, const packed_skeleton_oarchive& ar) const +{ + this->send(dest, tag, ar.get_skeleton()); +} + +template<> +void communicator::send<content>(int dest, int tag, const content& c) const +{ + BOOST_MPI_CHECK_RESULT(MPI_Send, + (MPI_BOTTOM, 1, c.get_mpi_datatype(), + dest, tag, MPI_Comm(*this))); +} + +template<> +status +communicator::recv<packed_iarchive>(int source, int tag, + packed_iarchive& ar) const +{ + status stat; + detail::packed_archive_recv(*this, source, tag, ar, + stat.m_status); + return stat; +} + +template<> +status +communicator::recv<packed_skeleton_iarchive> + (int source, int tag, packed_skeleton_iarchive& ar) const +{ + return this->recv(source, tag, ar.get_skeleton()); +} + +template<> +status +communicator::recv<const content>(int source, int tag, const content& c) const +{ + status stat; + BOOST_MPI_CHECK_RESULT(MPI_Recv, + (MPI_BOTTOM, 1, c.get_mpi_datatype(), + source, tag, MPI_Comm(*this), &stat.m_status)); + return stat; +} + +/************************************************************* + * non-blocking send/recv * + *************************************************************/ +template<> +request +communicator::isend<packed_oarchive>(int dest, int tag, + const packed_oarchive& ar) const +{ + return detail::packed_archive_isend(*this, dest, tag, ar); +} + +template<> +request +communicator::isend<packed_skeleton_oarchive> + (int dest, int tag, const packed_skeleton_oarchive& ar) const +{ + return this->isend(dest, tag, ar.get_skeleton()); +} + +template<> +request communicator::isend<content>(int dest, int tag, const content& c) const +{ + return request::make_bottom_send(*this, dest, tag, c.get_mpi_datatype()); +} + +request communicator::isend(int dest, int tag) const +{ + return request::make_empty_send(*this, dest, tag); +} + +template<> +request +communicator::irecv<packed_skeleton_iarchive> + (int source, int tag, packed_skeleton_iarchive& ar) const +{ + return this->irecv(source, tag, ar.get_skeleton()); +} + +template<> +request +communicator::irecv<const content>(int source, int tag, + const content& c) const +{ + return request::make_bottom_recv(*this, source, tag, c.get_mpi_datatype()); +} + +request communicator::irecv(int source, int tag) const +{ + return request::make_empty_recv(*this, source, tag); +} + +bool operator==(const communicator& comm1, const communicator& comm2) +{ + int result; + BOOST_MPI_CHECK_RESULT(MPI_Comm_compare, + (MPI_Comm(comm1), MPI_Comm(comm2), &result)); + return result == MPI_IDENT; +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/computation_tree.cpp b/src/boost/libs/mpi/src/computation_tree.cpp new file mode 100644 index 000000000..60de534d1 --- /dev/null +++ b/src/boost/libs/mpi/src/computation_tree.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2005 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Compute parents, children, levels, etc. to effect a parallel +// computation tree. + +#include <boost/mpi/detail/computation_tree.hpp> + +namespace boost { namespace mpi { namespace detail { + +int computation_tree::default_branching_factor = 3; + +computation_tree +::computation_tree(int rank, int size, int root, int branching_factor) + : rank(rank), size(size), root(root), + branching_factor_(branching_factor > 1? branching_factor + /* default */: default_branching_factor), + level_(0) +{ + // The position in the tree, once we've adjusted for non-zero + // roots. + int n = (rank + size - root) % size; + int sum = 0; + int term = 1; + + /* The level is the smallest value of k such that + + f^0 + f^1 + ... + f^k > n + + for branching factor f and index n in the tree. */ + while (sum <= n) { + ++level_; + term *= branching_factor_; + sum += term; + } +} + +int computation_tree::level_index(int n) const +{ + int sum = 0; + int term = 1; + while (n--) { + sum += term; + term *= branching_factor_; + } + return sum; +} + +int computation_tree::parent() const +{ + if (rank == root) return rank; + int n = rank + size - 1 - root; + return ((n % size / branching_factor_) + root) % size ; +} + +int computation_tree::child_begin() const +{ + // Zero-based index of this node + int n = (rank + size - root) % size; + + // Compute the index of the child (in a zero-based tree) + int child_index = level_index(level_ + 1) + + branching_factor_ * (n - level_index(level_)); + + if (child_index >= size) return root; + else return (child_index + root) % size; +} + +} } } // end namespace boost::mpi::detail diff --git a/src/boost/libs/mpi/src/content_oarchive.cpp b/src/boost/libs/mpi/src/content_oarchive.cpp new file mode 100644 index 000000000..0ec1ece37 --- /dev/null +++ b/src/boost/libs/mpi/src/content_oarchive.cpp @@ -0,0 +1,20 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> +#include <boost/mpi/skeleton_and_content.hpp> + +namespace boost { namespace archive { namespace detail { +// explicitly instantiate all required template functions + +template class archive_serializer_map<mpi::detail::content_oarchive> ; +template class archive_serializer_map<boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::content_oarchive> >; +template class archive_serializer_map<boost::mpi::detail::ignore_skeleton_oarchive<boost::mpi::detail::mpi_datatype_oarchive> >; +} } } diff --git a/src/boost/libs/mpi/src/environment.cpp b/src/boost/libs/mpi/src/environment.cpp new file mode 100644 index 000000000..72b0359fd --- /dev/null +++ b/src/boost/libs/mpi/src/environment.cpp @@ -0,0 +1,258 @@ +// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Message Passing Interface 1.1 -- 7.1.1. Environmental Inquiries +#include <boost/mpi/environment.hpp> +#include <boost/mpi/exception.hpp> +#include <boost/mpi/detail/mpi_datatype_cache.hpp> +#include <boost/core/uncaught_exceptions.hpp> +#include <cassert> +#include <string> +#include <exception> +#include <stdexcept> +#include <ostream> + +namespace boost { namespace mpi { +namespace threading { +std::istream& operator>>(std::istream& in, level& l) +{ + std::string tk; + in >> tk; + if (!in.bad()) { + if (tk == "single") { + l = single; + } else if (tk == "funneled") { + l = funneled; + } else if (tk == "serialized") { + l = serialized; + } else if (tk == "multiple") { + l = multiple; + } else { + in.setstate(std::ios::badbit); + } + } + return in; +} + +std::ostream& operator<<(std::ostream& out, level l) +{ + switch(l) { + case single: + out << "single"; + break; + case funneled: + out << "funneled"; + break; + case serialized: + out << "serialized"; + break; + case multiple: + out << "multiple"; + break; + default: + out << "<level error>[" << int(l) << ']'; + out.setstate(std::ios::badbit); + break; + } + return out; +} + +} // namespace threading + +#ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION +environment::environment(bool abrt) + : i_initialized(false), + abort_on_exception(abrt) +{ + if (!initialized()) { + BOOST_MPI_CHECK_RESULT(MPI_Init, (0, 0)); + i_initialized = true; + } + +#if (2 <= MPI_VERSION) + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#endif +} + +environment::environment(threading::level mt_level, bool abrt) + : i_initialized(false), + abort_on_exception(abrt) +{ + // It is not clear that we can pass null in MPI_Init_thread. + int dummy_thread_level = 0; + if (!initialized()) { + BOOST_MPI_CHECK_RESULT(MPI_Init_thread, + (0, 0, int(mt_level), &dummy_thread_level )); + i_initialized = true; + } + +#if (2 <= MPI_VERSION) + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#endif +} +#endif + +environment::environment(int& argc, char** &argv, bool abrt) + : i_initialized(false), + abort_on_exception(abrt) +{ + if (!initialized()) { + BOOST_MPI_CHECK_RESULT(MPI_Init, (&argc, &argv)); + i_initialized = true; + } + +#if (2 <= MPI_VERSION) + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#endif +} + +environment::environment(int& argc, char** &argv, threading::level mt_level, + bool abrt) + : i_initialized(false), + abort_on_exception(abrt) +{ + // It is not clear that we can pass null in MPI_Init_thread. + int dummy_thread_level = 0; + if (!initialized()) { + BOOST_MPI_CHECK_RESULT(MPI_Init_thread, + (&argc, &argv, int(mt_level), &dummy_thread_level)); + i_initialized = true; + } + +#if (2 <= MPI_VERSION) + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); +#endif +} + +environment::~environment() +{ + if (i_initialized) { + if (boost::core::uncaught_exceptions() > 0 && abort_on_exception) { + abort(-1); + } else if (!finalized()) { + detail::mpi_datatype_cache().clear(); + BOOST_MPI_CHECK_RESULT(MPI_Finalize, ()); + } + } +} + +void environment::abort(int errcode) +{ + BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_COMM_WORLD, errcode)); +} + +bool environment::initialized() +{ + int flag; + BOOST_MPI_CHECK_RESULT(MPI_Initialized, (&flag)); + return flag != 0; +} + +bool environment::finalized() +{ + int flag; + BOOST_MPI_CHECK_RESULT(MPI_Finalized, (&flag)); + return flag != 0; +} + +int environment::max_tag() +{ + int* max_tag_value; + int found = 0; + +#if (2 <= MPI_VERSION) + BOOST_MPI_CHECK_RESULT(MPI_Comm_get_attr, + (MPI_COMM_WORLD, MPI_TAG_UB, &max_tag_value, &found)); +#else + BOOST_MPI_CHECK_RESULT(MPI_Attr_get, + (MPI_COMM_WORLD, MPI_TAG_UB, &max_tag_value, &found)); +#endif + assert(found != 0); + return *max_tag_value - num_reserved_tags; +} + +int environment::collectives_tag() +{ + return max_tag() + 1; +} + +optional<int> environment::host_rank() +{ + int* host; + int found = 0; + +#if (2 <= MPI_VERSION) + BOOST_MPI_CHECK_RESULT(MPI_Comm_get_attr, + (MPI_COMM_WORLD, MPI_HOST, &host, &found)); +#else + BOOST_MPI_CHECK_RESULT(MPI_Attr_get, + (MPI_COMM_WORLD, MPI_HOST, &host, &found)); +#endif + if (!found || *host == MPI_PROC_NULL) + return optional<int>(); + else + return *host; +} + +optional<int> environment::io_rank() +{ + int* io; + int found = 0; + +#if (2 <= MPI_VERSION) + BOOST_MPI_CHECK_RESULT(MPI_Comm_get_attr, + (MPI_COMM_WORLD, MPI_IO, &io, &found)); +#else + BOOST_MPI_CHECK_RESULT(MPI_Attr_get, + (MPI_COMM_WORLD, MPI_IO, &io, &found)); +#endif + if (!found || *io == MPI_PROC_NULL) + return optional<int>(); + else + return *io; +} + +std::string environment::processor_name() +{ + char name[MPI_MAX_PROCESSOR_NAME]; + int len; + + BOOST_MPI_CHECK_RESULT(MPI_Get_processor_name, (name, &len)); + return std::string(name, len); +} + +threading::level environment::thread_level() +{ + int level; + + BOOST_MPI_CHECK_RESULT(MPI_Query_thread, (&level)); + return static_cast<threading::level>(level); +} + +bool environment::is_main_thread() +{ + int isit; + + BOOST_MPI_CHECK_RESULT(MPI_Is_thread_main, (&isit)); + return static_cast<bool>(isit); +} + +std::pair<int, int> environment::version() +{ + int major, minor; + BOOST_MPI_CHECK_RESULT(MPI_Get_version, (&major, &minor)); + return std::make_pair(major, minor); +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/error_string.cpp b/src/boost/libs/mpi/src/error_string.cpp new file mode 100644 index 000000000..a6db48a6f --- /dev/null +++ b/src/boost/libs/mpi/src/error_string.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018 Alain Miniussi <alain.miniussi -at- oca.eu>. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <sstream> +#include <boost/mpi/error_string.hpp> + +namespace boost { namespace mpi { + +std::string error_string(int err) +{ + char buffer[MPI_MAX_ERROR_STRING]; + int len; + int status = MPI_Error_string(err, buffer, &len); + if (status == MPI_SUCCESS) { + return std::string(buffer); + } else { + std::ostringstream out; + if (status == MPI_ERR_ARG) { + out << "<invalid MPI error code " << err << ">"; + } else { + out << "<got error " << status + << " while probing MPI error " << err << ">"; + } + return out.str(); + } +} + +} } diff --git a/src/boost/libs/mpi/src/exception.cpp b/src/boost/libs/mpi/src/exception.cpp new file mode 100644 index 000000000..aaae9e377 --- /dev/null +++ b/src/boost/libs/mpi/src/exception.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2007 Trustees of Indiana University + +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/exception.hpp> +#include <boost/mpi/error_string.hpp> + +namespace boost { namespace mpi { + +exception::exception(const char* routine, int result_code) + : routine_(routine), result_code_(result_code) +{ + message.append(routine_); + message.append(": "); + message.append(error_string(result_code)); +} + +exception::~exception() throw() { } + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/graph_communicator.cpp b/src/boost/libs/mpi/src/graph_communicator.cpp new file mode 100644 index 000000000..586b57bf9 --- /dev/null +++ b/src/boost/libs/mpi/src/graph_communicator.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2007 Trustees of Indiana University + +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/graph_communicator.hpp> + +namespace boost { namespace mpi { + +// Incidence Graph requirements +std::pair<detail::comm_out_edge_iterator, detail::comm_out_edge_iterator> +out_edges(int vertex, const graph_communicator& comm) +{ + int nneighbors = out_degree(vertex, comm); + shared_array<int> neighbors(new int[nneighbors]); + BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors, + ((MPI_Comm)comm, vertex, nneighbors, neighbors.get())); + return std::make_pair(detail::comm_out_edge_iterator(vertex, neighbors, 0), + detail::comm_out_edge_iterator(vertex, neighbors, + nneighbors)); +} + +int out_degree(int vertex, const graph_communicator& comm) +{ + int nneighbors; + BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors_count, + ((MPI_Comm)comm, vertex, &nneighbors)); + return nneighbors; +} + +// Adjacency Graph requirements +std::pair<detail::comm_adj_iterator, detail::comm_adj_iterator> +adjacent_vertices(int vertex, const graph_communicator& comm) +{ + int nneighbors = out_degree(vertex, comm); + shared_array<int> neighbors(new int[nneighbors]); + BOOST_MPI_CHECK_RESULT(MPI_Graph_neighbors, + ((MPI_Comm)comm, vertex, nneighbors, neighbors.get())); + return std::make_pair(detail::comm_adj_iterator(neighbors, 0), + detail::comm_adj_iterator(neighbors, nneighbors)); +} + +// Edge List Graph requirements +std::pair<detail::comm_edge_iterator, detail::comm_edge_iterator> +edges(const graph_communicator& comm); + +std::pair<detail::comm_edge_iterator, detail::comm_edge_iterator> +edges(const graph_communicator& comm) +{ + int nnodes, nedges; + BOOST_MPI_CHECK_RESULT(MPI_Graphdims_get, ((MPI_Comm)comm, &nnodes, &nedges)); + + shared_array<int> indices(new int[nnodes]); + shared_array<int> edges(new int[nedges]); + BOOST_MPI_CHECK_RESULT(MPI_Graph_get, + ((MPI_Comm)comm, nnodes, nedges, + indices.get(), edges.get())); + return std::make_pair(detail::comm_edge_iterator(indices, edges), + detail::comm_edge_iterator(nedges)); +} + + +int num_edges(const graph_communicator& comm) +{ + int nnodes, nedges; + BOOST_MPI_CHECK_RESULT(MPI_Graphdims_get, ((MPI_Comm)comm, &nnodes, &nedges)); + return nedges; +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/group.cpp b/src/boost/libs/mpi/src/group.cpp new file mode 100644 index 000000000..034d08f78 --- /dev/null +++ b/src/boost/libs/mpi/src/group.cpp @@ -0,0 +1,106 @@ +// Copyright (C) 2007 Trustees of Indiana University + +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/group.hpp> +#include <boost/mpi/communicator.hpp> + +namespace boost { namespace mpi { + +group::group(const MPI_Group& in_group, bool adopt) +{ + if (in_group != MPI_GROUP_EMPTY) { + if (adopt) group_ptr.reset(new MPI_Group(in_group), group_free()); + else group_ptr.reset(new MPI_Group(in_group)); + } +} + +optional<int> group::rank() const +{ + if (!group_ptr) + return optional<int>(); + + int rank; + BOOST_MPI_CHECK_RESULT(MPI_Group_rank, (*group_ptr, &rank)); + if (rank == MPI_UNDEFINED) + return optional<int>(); + else + return rank; +} + +int group::size() const +{ + if (!group_ptr) + return 0; + + int size; + BOOST_MPI_CHECK_RESULT(MPI_Group_size, (*group_ptr, &size)); + return size; +} + +bool operator==(const group& g1, const group& g2) +{ + int result; + BOOST_MPI_CHECK_RESULT(MPI_Group_compare, + ((MPI_Group)g1, (MPI_Group)g2, &result)); + return result == MPI_IDENT; +} + +group operator|(const group& g1, const group& g2) +{ + MPI_Group result; + BOOST_MPI_CHECK_RESULT(MPI_Group_union, + ((MPI_Group)g1, (MPI_Group)g2, &result)); + return group(result, /*adopt=*/true); +} + +group operator&(const group& g1, const group& g2) +{ + MPI_Group result; + BOOST_MPI_CHECK_RESULT(MPI_Group_intersection, + ((MPI_Group)g1, (MPI_Group)g2, &result)); + return group(result, /*adopt=*/true); +} + +group operator-(const group& g1, const group& g2) +{ + MPI_Group result; + BOOST_MPI_CHECK_RESULT(MPI_Group_difference, + ((MPI_Group)g1, (MPI_Group)g2, &result)); + return group(result, /*adopt=*/true); +} + +template<> +int* +group::translate_ranks(int* first, int* last, const group& to_group, int* out) +{ + BOOST_MPI_CHECK_RESULT(MPI_Group_translate_ranks, + ((MPI_Group)*this, + last-first, + first, + (MPI_Group)to_group, + out)); + return out + (last - first); +} + +template<> group group::include(int* first, int* last) +{ + MPI_Group result; + BOOST_MPI_CHECK_RESULT(MPI_Group_incl, + ((MPI_Group)*this, last - first, first, &result)); + return group(result, /*adopt=*/true); +} + +template<> group group::exclude(int* first, int* last) +{ + MPI_Group result; + BOOST_MPI_CHECK_RESULT(MPI_Group_excl, + ((MPI_Group)*this, last - first, first, &result)); + return group(result, /*adopt=*/true); +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/intercommunicator.cpp b/src/boost/libs/mpi/src/intercommunicator.cpp new file mode 100644 index 000000000..6b072853c --- /dev/null +++ b/src/boost/libs/mpi/src/intercommunicator.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2007 Trustees of Indiana University + +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/intercommunicator.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/group.hpp> + +namespace boost { namespace mpi { + +intercommunicator::intercommunicator(const communicator& local, + int local_leader, + const communicator& peer, + int remote_leader) +{ + MPI_Comm comm; + BOOST_MPI_CHECK_RESULT(MPI_Intercomm_create, + ((MPI_Comm)local, local_leader, + (MPI_Comm)peer, remote_leader, + environment::collectives_tag(), &comm)); + comm_ptr.reset(new MPI_Comm(comm), comm_free()); +} + +boost::mpi::group intercommunicator::local_group() const +{ + return this->group(); +} + +int intercommunicator::remote_size() const +{ + int size; + BOOST_MPI_CHECK_RESULT(MPI_Comm_remote_size, ((MPI_Comm)*this, &size)); + return size; +} + +boost::mpi::group intercommunicator::remote_group() const +{ + MPI_Group gr; + BOOST_MPI_CHECK_RESULT(MPI_Comm_remote_group, ((MPI_Comm)*this, &gr)); + return boost::mpi::group(gr, /*adopt=*/true); +} + +communicator intercommunicator::merge(bool high) const +{ + MPI_Comm comm; + BOOST_MPI_CHECK_RESULT(MPI_Intercomm_merge, ((MPI_Comm)*this, high, &comm)); + return communicator(comm, comm_take_ownership); +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/mpi_datatype_cache.cpp b/src/boost/libs/mpi/src/mpi_datatype_cache.cpp new file mode 100644 index 000000000..8437e9d02 --- /dev/null +++ b/src/boost/libs/mpi/src/mpi_datatype_cache.cpp @@ -0,0 +1,66 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/mpi/detail/mpi_datatype_cache.hpp> +#include <map> + +namespace boost { namespace mpi { namespace detail { + + typedef std::map<std::type_info const*,MPI_Datatype,type_info_compare> + stored_map_type; + + struct mpi_datatype_map::implementation + { + stored_map_type map; + }; + + mpi_datatype_map::mpi_datatype_map() + { + impl = new implementation(); + } + + void mpi_datatype_map::clear() + { + // do not free after call to MPI_FInalize + int finalized=0; + BOOST_MPI_CHECK_RESULT(MPI_Finalized,(&finalized)); + if (!finalized) { + // ignore errors in the destructor + for (stored_map_type::iterator it=impl->map.begin(); it != impl->map.end(); ++it) + MPI_Type_free(&(it->second)); + } + } + + + mpi_datatype_map::~mpi_datatype_map() + { + clear(); + delete impl; + } + + MPI_Datatype mpi_datatype_map::get(const std::type_info* t) + { + stored_map_type::iterator pos = impl->map.find(t); + if (pos != impl->map.end()) + return pos->second; + else + return MPI_DATATYPE_NULL; + } + + void mpi_datatype_map::set(const std::type_info* t, MPI_Datatype datatype) + { + impl->map[t] = datatype; + } + + mpi_datatype_map& mpi_datatype_cache() + { + static mpi_datatype_map cache; + return cache; + } +} } } diff --git a/src/boost/libs/mpi/src/mpi_datatype_oarchive.cpp b/src/boost/libs/mpi/src/mpi_datatype_oarchive.cpp new file mode 100644 index 000000000..05d58427c --- /dev/null +++ b/src/boost/libs/mpi/src/mpi_datatype_oarchive.cpp @@ -0,0 +1,19 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> +#include <boost/mpi/detail/mpi_datatype_oarchive.hpp> + +namespace boost { namespace archive { namespace detail { +// explicitly instantiate all required template functions + +template class archive_serializer_map<mpi::detail::mpi_datatype_oarchive> ; + +} } } diff --git a/src/boost/libs/mpi/src/offsets.cpp b/src/boost/libs/mpi/src/offsets.cpp new file mode 100644 index 000000000..2382d456f --- /dev/null +++ b/src/boost/libs/mpi/src/offsets.cpp @@ -0,0 +1,96 @@ +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Alain Miniussi + +#include <boost/mpi/detail/offsets.hpp> +#include <boost/mpi/detail/antiques.hpp> + +namespace boost { namespace mpi { +namespace detail { + +// Convert a sequence of sizes [S0..Sn] to a sequence displacement +// [O0..On] where O[0] = 0 and O[k+1] = O[k]+S[k]. +void +sizes2offsets(int const* sizes, int* offsets, int n) +{ + offsets[0] = 0; + for(int i = 1; i < n; ++i) { + offsets[i] = offsets[i-1] + sizes[i-1]; + } +} + +// Convert a sequence of sizes [S0..Sn] to a sequence displacement +// [O0..On] where O[0] = 0 and O[k+1] = O[k]+S[k]. +void +sizes2offsets(std::vector<int> const& sizes, std::vector<int>& offsets) +{ + int sz = sizes.size(); + offsets.resize(sz); + sizes2offsets(c_data(sizes), c_data(offsets), sz); +} + +// Given a sequence of sizes (typically the number of records dispatched +// to each process in a scater) and a sequence of displacements (typically the +// slot index at with those record starts), convert the later to a number +// of skipped slots. +void +offsets2skipped(int const* sizes, int const* offsets, int* skipped, int n) +{ + skipped[0] = 0; + for(int i = 1; i < n; ++i) { + skipped[i] -= offsets[i-1] + sizes[i-1]; + } +} + +// Reconstruct offsets from sizes assuming no padding. +// Only takes place if on the root process and if +// displs are not already provided. +// If memory was allocated, returns a pointer to it +// otherwise null. +int* +make_offsets(communicator const& comm, int const* sizes, int const* displs, int root) +{ + if (root == -1 || root == comm.rank()) { + assert(sizes); + if (!displs) { + int nproc = comm.size(); + int* offsets = new int[nproc]; + displs = offsets; + sizes2offsets(sizes, offsets, nproc); + return offsets; + } else { + return 0; + } + } else { + return 0; + } +} + +// Reconstruct skip slots from sizes and offsets. +// Only takes place if on the root process and if +// displs are provided. +// If memory was allocated, returns a pointer to it +// otherwise null. +int* +make_skipped_slots(communicator const& comm, int const* sizes, int const* displs, int root) +{ + if (root == -1 || root == comm.rank()) { + assert(sizes); + if (displs) { + int nproc = comm.size(); + int* skipped = new int[nproc]; + std::copy(displs, displs+nproc, skipped); + offsets2skipped(sizes, displs, skipped, nproc); + return skipped; + } else { + return 0; + } + } else { + return 0; + } +} +} +}} diff --git a/src/boost/libs/mpi/src/packed_iarchive.cpp b/src/boost/libs/mpi/src/packed_iarchive.cpp new file mode 100644 index 000000000..a0ea5a6b5 --- /dev/null +++ b/src/boost/libs/mpi/src/packed_iarchive.cpp @@ -0,0 +1,21 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/mpi/packed_iarchive.hpp> + +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> + +namespace boost { namespace archive { + +// explicitly instantiate all required templates + +template class detail::archive_serializer_map<mpi::packed_iarchive> ; + +} } // end namespace boost::archive diff --git a/src/boost/libs/mpi/src/packed_oarchive.cpp b/src/boost/libs/mpi/src/packed_oarchive.cpp new file mode 100644 index 000000000..d340a40f9 --- /dev/null +++ b/src/boost/libs/mpi/src/packed_oarchive.cpp @@ -0,0 +1,19 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/mpi/packed_oarchive.hpp> +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> + +namespace boost { namespace archive { +// explicitly instantiate all required templates + +template class detail::archive_serializer_map<mpi::packed_oarchive> ; + +} } // end namespace boost::archive diff --git a/src/boost/libs/mpi/src/packed_skeleton_iarchive.cpp b/src/boost/libs/mpi/src/packed_skeleton_iarchive.cpp new file mode 100644 index 000000000..97115c20c --- /dev/null +++ b/src/boost/libs/mpi/src/packed_skeleton_iarchive.cpp @@ -0,0 +1,25 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE + +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> +#include <boost/mpi/skeleton_and_content.hpp> + +namespace boost { namespace archive { + +// explicitly instantiate all required templates + +// template class basic_binary_iarchive<mpi::packed_skeleton_iarchive> ; +template class detail::archive_serializer_map<mpi::packed_skeleton_iarchive> ; +template class detail::archive_serializer_map< + mpi::detail::forward_skeleton_iarchive< + boost::mpi::packed_skeleton_iarchive, boost::mpi::packed_iarchive> > ; + +} } // end namespace boost::archive diff --git a/src/boost/libs/mpi/src/packed_skeleton_oarchive.cpp b/src/boost/libs/mpi/src/packed_skeleton_oarchive.cpp new file mode 100644 index 000000000..5cfecc077 --- /dev/null +++ b/src/boost/libs/mpi/src/packed_skeleton_oarchive.cpp @@ -0,0 +1,22 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/mpi/skeleton_and_content.hpp> +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> + +namespace boost { namespace archive { +// explicitly instantiate all required templates + +template class detail::archive_serializer_map<mpi::packed_skeleton_oarchive> ; +template class detail::archive_serializer_map< + mpi::detail::forward_skeleton_oarchive< + boost::mpi::packed_skeleton_oarchive, boost::mpi::packed_oarchive> > ; + +} } // end namespace boost::archive diff --git a/src/boost/libs/mpi/src/point_to_point.cpp b/src/boost/libs/mpi/src/point_to_point.cpp new file mode 100644 index 000000000..6fc0ad175 --- /dev/null +++ b/src/boost/libs/mpi/src/point_to_point.cpp @@ -0,0 +1,102 @@ +// Copyright 2005 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Message Passing Interface 1.1 -- Section 3. MPI Point-to-point + +/* There is the potential for optimization here. We could keep around + a "small message" buffer of size N that we just receive into by + default. If the message is N - sizeof(int) bytes or smaller, it can + just be sent with that buffer. If it's larger, we send the first N + - sizeof(int) bytes in the first packet followed by another + packet. The size of the second packet will be stored in an integer + at the end of the first packet. + + We will introduce this optimization later, when we have more + performance test cases and have met our functionality goals. */ + +#include <boost/mpi/detail/point_to_point.hpp> +#include <boost/mpi/datatype.hpp> +#include <boost/mpi/exception.hpp> +#include <boost/mpi/request.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/detail/antiques.hpp> +#include <cassert> + +namespace boost { namespace mpi { namespace detail { + +void +packed_archive_send(communicator const& comm, int dest, int tag, + const packed_oarchive& ar) +{ +#if defined(BOOST_MPI_USE_IMPROBE) + { + void *buf = detail::unconst(ar.address()); + BOOST_MPI_CHECK_RESULT(MPI_Send, + (buf, ar.size(), MPI_PACKED, + dest, tag, comm)); + } +#else + { + std::size_t const& size = ar.size(); + BOOST_MPI_CHECK_RESULT(MPI_Send, + (detail::unconst(&size), 1, + get_mpi_datatype(size), + dest, tag, comm)); + BOOST_MPI_CHECK_RESULT(MPI_Send, + (detail::unconst(ar.address()), size, + MPI_PACKED, + dest, tag, comm)); + } +#endif +} + +request +packed_archive_isend(communicator const& comm, int dest, int tag, + const packed_oarchive& ar) +{ + return request::make_packed_send(comm, dest, tag, + detail::unconst(ar.address()), ar.size()); +} + +request +packed_archive_isend(communicator const& comm, int dest, int tag, + const packed_iarchive& ar) +{ + return request::make_packed_send(comm, dest, tag, + detail::unconst(ar.address()), ar.size()); +} + +void +packed_archive_recv(communicator const& comm, int source, int tag, packed_iarchive& ar, + MPI_Status& status) +{ +#if defined(BOOST_MPI_USE_IMPROBE) + { + MPI_Message msg; + BOOST_MPI_CHECK_RESULT(MPI_Mprobe, (source, tag, comm, &msg, &status)); + int count; + BOOST_MPI_CHECK_RESULT(MPI_Get_count, (&status, MPI_PACKED, &count)); + ar.resize(count); + BOOST_MPI_CHECK_RESULT(MPI_Mrecv, (ar.address(), count, MPI_PACKED, &msg, &status)); + } +#else + { + std::size_t count; + BOOST_MPI_CHECK_RESULT(MPI_Recv, + (&count, 1, get_mpi_datatype(count), + source, tag, comm, &status)); + + // Prepare input buffer and receive the message + ar.resize(count); + BOOST_MPI_CHECK_RESULT(MPI_Recv, + (ar.address(), count, MPI_PACKED, + status.MPI_SOURCE, status.MPI_TAG, + comm, &status)); + } +#endif +} + +} } } // end namespace boost::mpi::detail diff --git a/src/boost/libs/mpi/src/python/collectives.cpp b/src/boost/libs/mpi/src/python/collectives.cpp new file mode 100644 index 000000000..fc4bf7b4c --- /dev/null +++ b/src/boost/libs/mpi/src/python/collectives.cpp @@ -0,0 +1,144 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file status.cpp + * + * This file reflects the Boost.MPI @c status class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi.hpp> +#include <boost/mpi/python/serialize.hpp> + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* all_gather_docstring; +extern const char* all_reduce_docstring; +extern const char* all_to_all_docstring; +extern const char* broadcast_docstring; +extern const char* gather_docstring; +extern const char* reduce_docstring; +extern const char* scan_docstring; +extern const char* scatter_docstring; + +object all_gather(const communicator& comm, object value) +{ + std::vector<object> values; + boost::mpi::all_gather(comm, value, values); + + boost::python::list l; + for (int i = 0; i < comm.size(); ++i) + l.append(values[i]); + return boost::python::tuple(l); +} + +object all_to_all(const communicator& comm, object in_values) +{ + // Build input values + std::vector<object> in_values_vec(comm.size()); + object iterator = object(handle<>(PyObject_GetIter(in_values.ptr()))); + for (int i = 0; i < comm.size(); ++i) + in_values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); + + std::vector<object> out_values_vec(comm.size()); + boost::mpi::all_to_all(comm, in_values_vec, out_values_vec); + + boost::python::list l; + for (int i = 0; i < comm.size(); ++i) + l.append(out_values_vec[i]); + return boost::python::tuple(l); +} + +object broadcast(const communicator& comm, object value, int root) +{ + boost::mpi::broadcast(comm, value, root); + return value; +} + +object gather(const communicator& comm, object value, int root) +{ + if (comm.rank() == root) { + std::vector<object> values; + boost::mpi::gather(comm, value, values, root); + + boost::python::list l; + for (int i = 0; i < comm.size(); ++i) + l.append(values[i]); + return boost::python::tuple(l); + } else { + boost::mpi::gather(comm, value, root); + return object(); + } +} + +object reduce(const communicator& comm, object value, object op, int root) +{ + if (comm.rank() == root) { + object out_value; + boost::mpi::reduce(comm, value, out_value, op, root); + return out_value; + } else { + boost::mpi::reduce(comm, value, op, root); + return object(); + } +} + +object scatter(const communicator& comm, object values, int root) +{ + object result; + + if (comm.rank() == root) { + std::vector<object> values_vec(comm.size()); + object iterator = object(handle<>(PyObject_GetIter(values.ptr()))); + for (int i = 0; i < comm.size(); ++i) + values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr()))); + + boost::mpi::scatter(comm, values_vec, result, root); + } else { + boost::mpi::scatter(comm, result, root); + } + return result; +} + +void export_collectives() +{ + using boost::python::arg; + + def("all_reduce", + (object (*)(const communicator&, const object&, object))&all_reduce, + (arg("comm") = communicator(), arg("value"), arg("op")), + all_reduce_docstring); + def("all_gather", &all_gather, + (arg("comm") = communicator(), arg("value") = object()), + all_gather_docstring); + def("all_to_all", &all_to_all, + (arg("comm") = communicator(), arg("values") = object()), + all_to_all_docstring); + def("broadcast", &broadcast, + (arg("comm") = communicator(), arg("value") = object(), arg("root")), + broadcast_docstring); + def("gather", &gather, + (arg("comm") = communicator(), arg("value") = object(), arg("root")), + gather_docstring); + def("reduce", &reduce, + (arg("comm") = communicator(), arg("value"), arg("op"), + arg("root")), + reduce_docstring); + def("scan", + (object (*)(const communicator&, const object&, object))&scan, + (arg("comm") = communicator(), arg("value"), arg("op")), + scan_docstring); + def("scatter", &scatter, + (arg("comm") = communicator(), arg("values") = object(), arg("root")), + scatter_docstring); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/datatypes.cpp b/src/boost/libs/mpi/src/python/datatypes.cpp new file mode 100644 index 000000000..48d7f6f74 --- /dev/null +++ b/src/boost/libs/mpi/src/python/datatypes.cpp @@ -0,0 +1,27 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file datatypes.cpp + * + * This file provides datatypes support for Boost.MPI in Python. + */ +#include <boost/mpi/python/serialize.hpp> +#include <boost/mpi.hpp> + +namespace boost { namespace mpi { namespace python { + +void export_datatypes() +{ +#if PY_MAJOR_VERSION < 3 + register_serialized(long(0), &PyInt_Type); +#endif + register_serialized(false, &PyBool_Type); + register_serialized(double(0.0), &PyFloat_Type); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/documentation.cpp b/src/boost/libs/mpi/src/python/documentation.cpp new file mode 100644 index 000000000..ef28fb3b5 --- /dev/null +++ b/src/boost/libs/mpi/src/python/documentation.cpp @@ -0,0 +1,671 @@ +// (C) Copyright 2005 The Trustees of Indiana University. +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file documentation.cpp + * + * This file contains all of the documentation strings for the + * Boost.MPI Python bindings. + */ +namespace boost { namespace mpi { namespace python { + +const char* module_docstring = + "The boost.mpi module contains Python wrappers for Boost.MPI.\n" + "Boost.MPI is a C++ interface to the Message Passing Interface 1.1,\n" + "a high-performance message passing library for parallel programming.\n" + "\n" + "This module supports the most commonly used subset of MPI 1.1. All\n" + "communication operations can transmit any Python object that can be\n" + "pickled and unpickled, along with C++-serialized data types and\n" + "separation of the structure of a data type from its content.\n" + "Collectives that have a user-supplied functions,\n" + "such as reduce() or scan(), accept arbitrary Python functions, and\n" + "all collectives can operate on any serializable or picklable data type.\n" + "\n" + "IMPORTANT MODULE DATA\n" + " any_source This constant may be used for the source parameter of\n" + " receive and probe operations to indicate that a\n" + " message may be received from any source.\n" + "\n" + " any_tag This constant may be used for the tag parameter of\n" + " receive or probe operations to indicate that a send\n" + " with any tag will be matched.\n" + "\n" + " collectives_tag Returns the reserved tag value used by the Boost.MPI\n" + " implementation for collective operations. Although\n" + " users are not permitted to use this tag to send or\n" + " receive messages with this tag, it may be useful when\n" + " monitoring communication patterns.\n" + "\n" + " host_rank If there is a host process, this is the rank of that\n" + " that process. Otherwise, this value will be None. MPI\n" + " does not define the meaning of a \"host\" process: \n" + " consult the documentation for your MPI implementation.\n" + "\n" + " io_rank The rank of a process that can perform input/output\n" + " via the standard facilities. If every process can\n" + " perform I/O using the standard facilities, this value\n" + " will be the same as any_source. If no process can\n" + " perform I/O, this value will be None.\n" + "\n" + " max_tag The maximum value that may be used for the tag\n" + " parameter of send/receive operations. This value will\n" + " be somewhat smaller than the value of MPI_TAG_UB,\n" + " because the Boost.MPI implementation reserves some\n" + " tags for collective operations.\n" + "\n" + " processor_name The name of this processor. The actual form of the\n" + " of the name is unspecified, but may be documented by\n" + " the underlying MPI implementation.\n" + "\n" + " rank The rank of this process in the \"world\" communicator.\n" + "\n" + " size The number of processes in the \"world\" communicator.\n" + " that process. Otherwise, this value will be None. MPI\n" + " does not define the meaning of a \"host\" process: \n" + "\n" + " world The \"world\" communicator from which all other\n" + " communicators will be derived. This is the equivalent\n" + " of MPI_COMM_WORLD.\n" + "\n" + "TRANSMITTING USER-DEFINED DATA\n" + " Boost.MPI can transmit user-defined data in several different ways.\n" + " Most importantly, it can transmit arbitrary Python objects by pickling\n" + " them at the sender and unpickling them at the receiver, allowing\n" + " arbitrarily complex Python data structures to interoperate with MPI.\n" + "\n" + " Boost.MPI also supports efficient serialization and transmission of\n" + " C++ objects (that have been exposed to Python) through its C++\n" + " interface. Any C++ type that provides (de-)serialization routines that\n" + " meet the requirements of the Boost.Serialization library is eligible\n" + " for this optimization, but the type must be registered in advance. To\n" + " register a C++ type, invoke the C++ function:\n" + " boost::mpi::python::register_serialized\n" + "\n" + " Finally, Boost.MPI supports separation of the structure of an object\n" + " from the data it stores, allowing the two pieces to be transmitted\n" + " separately. This \"skeleton/content\" mechanism, described in more\n" + " detail in a later section, is a communication optimization suitable\n" + " for problems with fixed data structures whose internal data changes\n" + " frequently.\n" + "\n" + "COLLECTIVES\n" + " Boost.MPI supports all of the MPI collectives (scatter, reduce, scan,\n" + " broadcast, etc.) for any type of data that can be transmitted with the\n" + " point-to-point communication operations. For the MPI collectives that\n" + " require a user-specified operation (e.g., reduce and scan), the\n" + " operation can be an arbitrary Python function. For instance, one could\n" + " concatenate strings with all_reduce:\n\n" + " mpi.all_reduce(my_string, lambda x,y: x + y)\n\n" + " The following module-level functions implement MPI collectives:\n" + " all_gather Gather the values from all processes.\n" + " all_reduce Combine the results from all processes.\n" + " all_to_all Every process sends data to every other process.\n" + " broadcast Broadcast data from one process to all other processes.\n" + " gather Gather the values from all processes to the root.\n" + " reduce Combine the results from all processes to the root.\n" + " scan Prefix reduction of the values from all processes.\n" + " scatter Scatter the values stored at the root to all processes.\n" + "\n" + "SKELETON/CONTENT MECHANISM\n" + " Boost.MPI provides a skeleton/content mechanism that allows the\n" + " transfer of large data structures to be split into two separate stages,\n" + " with the `skeleton' (or, `shape') of the data structure sent first and\n" + " the content (or, `data') of the data structure sent later, potentially\n" + " several times, so long as the structure has not changed since the\n" + " skeleton was transferred. The skeleton/content mechanism can improve\n" + " performance when the data structure is large and its shape is fixed,\n" + " because while the skeleton requires serialization (it has an unknown\n" + " size), the content transfer is fixed-size and can be done without\n" + " extra copies.\n" + "\n" + " To use the skeleton/content mechanism from Python, you must first\n" + " register the type of your data structure with the skeleton/content\n" + " mechanism *from C++*. The registration function is\n" + " boost::mpi::python::register_skeleton_and_content\n" + " and resides in the <boost/mpi/python.hpp> header.\n" + "\n" + " Once you have registered your C++ data structures, you can extract\n" + " the skeleton for an instance of that data structure with skeleton().\n" + " The resulting SkeletonProxy can be transmitted via the normal send\n" + " routine, e.g.,\n\n" + " mpi.world.send(1, 0, skeleton(my_data_structure))\n\n" + " SkeletonProxy objects can be received on the other end via recv(),\n" + " which stores a newly-created instance of your data structure with the\n" + " same `shape' as the sender in its `object' attribute:\n\n" + " shape = mpi.world.recv(0, 0)\n" + " my_data_structure = shape.object\n\n" + " Once the skeleton has been transmitted, the content (accessed via \n" + " get_content) can be transmitted in much the same way. Note, however,\n" + " that the receiver also specifies get_content(my_data_structure) in its\n" + " call to receive:\n\n" + " if mpi.rank == 0:\n" + " mpi.world.send(1, 0, get_content(my_data_structure))\n" + " else:\n" + " mpi.world.recv(0, 0, get_content(my_data_structure))\n\n" + " Of course, this transmission of content can occur repeatedly, if the\n" + " values in the data structure--but not its shape--changes.\n" + "\n" + " The skeleton/content mechanism is a structured way to exploit the\n" + " interaction between custom-built MPI datatypes and MPI_BOTTOM, to\n" + " eliminate extra buffer copies.\n" + "\n" + "C++/PYTHON MPI COMPATIBILITY\n" + " Boost.MPI is a C++ library whose facilities have been exposed to Python\n" + " via the Boost.Python library. Since the Boost.MPI Python bindings are\n" + " build directly on top of the C++ library, and nearly every feature of\n" + " C++ library is available in Python, hybrid C++/Python programs using\n" + " Boost.MPI can interact, e.g., sending a value from Python but receiving\n" + " that value in C++ (or vice versa). However, doing so requires some\n" + " care. Because Python objects are dynamically typed, Boost.MPI transfers\n" + " type information along with the serialized form of the object, so that\n" + " the object can be received even when its type is not known. This\n" + " mechanism differs from its C++ counterpart, where the static types of\n" + " transmitted values are always known.\n" + "\n" + " The only way to communicate between the C++ and Python views on \n" + " Boost.MPI is to traffic entirely in Python objects. For Python, this is\n" + " the normal state of affairs, so nothing will change. For C++, this\n" + " means sending and receiving values of type boost::python::object, from\n" + " the Boost.Python library. For instance, say we want to transmit an\n" + " integer value from Python:\n\n" + " comm.send(1, 0, 17)\n\n" + " In C++, we would receive that value into a Python object and then\n" + " `extract' an integer value:\n\n" + " boost::python::object value;\n" + " comm.recv(0, 0, value);\n" + " int int_value = boost::python::extract<int>(value);\n\n" + " In the future, Boost.MPI will be extended to allow improved\n" + " interoperability with the C++ Boost.MPI and the C MPI bindings.\n" + ; + +/*********************************************************** + * environment documentation * + ***********************************************************/ +const char* environment_init_docstring = + "Initialize the MPI environment. Users should not need to call\n" + "this function directly, because the MPI environment will be\n" + "automatically initialized when the Boost.MPI module is loaded.\n"; + +const char* environment_finalize_docstring = + "Finalize (shut down) the MPI environment. Users only need to\n" + "invoke this function if MPI should be shut down before program\n" + "termination. Boost.MPI will automatically finalize the MPI\n" + "environment when the program exits.\n"; + +const char* environment_abort_docstring = + "Aborts all MPI processes and returns to the environment. The\n" + "precise behavior will be defined by the underlying MPI\n" + "implementation. This is equivalent to a call to MPI_Abort with\n" + "MPI_COMM_WORLD.\n" + "errcode is the error code to return from aborted processes.\n"; + +const char* environment_initialized_docstring = + "Determine if the MPI environment has already been initialized.\n"; + +const char* environment_finalized_docstring = + "Determine if the MPI environment has already been finalized.\n"; + +/*********************************************************** + * nonblocking documentation * + ***********************************************************/ +const char* request_list_init_docstring= + "Without arguments, constructs an empty RequestList.\n" + "With one argument `iterable', copies request objects from this\n" + "iterable to the new RequestList.\n"; + +const char* nonblocking_wait_any_docstring = + "Waits until any of the given requests has been completed. It provides\n" + "functionality equivalent to MPI_Waitany.\n" + "\n" + "requests must be a RequestList instance.\n" + "\n" + "Returns a triple (value, status, index) consisting of received value\n" + "(or None), the Status object for the completed request, and its index\n" + "in the RequestList.\n"; + +const char* nonblocking_test_any_docstring = + "Tests if any of the given requests have been completed, but does not wait\n" + "for completion. It provides functionality equivalent to MPI_Testany.\n" + "\n" + "requests must be a RequestList instance.\n" + "\n" + "Returns a triple (value, status, index) like wait_any or None if no request\n" + "is complete.\n"; + +const char* nonblocking_wait_all_docstring = + "Waits until all of the given requests have been completed. It provides\n" + "functionality equivalent to MPI_Waitall.\n" + "\n" + "requests must be a RequestList instance.\n" + "\n" + "If the second parameter `callable' is provided, it is called with each\n" + "completed request's received value (or None) and it s Status object as\n" + "its arguments. The calls occur in the order given by the `requests' list.\n"; + +const char* nonblocking_test_all_docstring = + "Tests if all of the given requests have been completed. It provides\n" + "functionality equivalent to MPI_Testall.\n" + "\n" + "Returns True if all requests have been completed.\n" + "\n" + "requests must be a RequestList instance.\n" + "\n" + "If the second parameter `callable' is provided, it is called with each\n" + "completed request's received value (or None) and it s Status object as\n" + "its arguments. The calls occur in the order given by the `requests' list.\n"; + +const char* nonblocking_wait_some_docstring = + "Waits until at least one of the given requests has completed. It\n" + "then completes all of the requests it can, partitioning the input\n" + "sequence into pending requests followed by completed requests.\n" + "\n" + "This routine provides functionality equivalent to MPI_Waitsome.\n" + "\n" + "Returns the index of the first completed request." + "\n" + "requests must be a RequestList instance.\n" + "\n" + "If the second parameter `callable' is provided, it is called with each\n" + "completed request's received value (or None) and it s Status object as\n" + "its arguments. The calls occur in the order given by the `requests' list.\n"; + +const char* nonblocking_test_some_docstring = + "Tests to see if any of the given requests has completed. It completes\n" + "all of the requests it can, partitioning the input sequence into pending\n" + "requests followed by completed requests. This routine is similar to\n" + "wait_some, but does not wait until any requests have completed.\n" + "\n" + "This routine provides functionality equivalent to MPI_Testsome.\n" + "\n" + "Returns the index of the first completed request." + "\n" + "requests must be a RequestList instance.\n" + "\n" + "If the second parameter `callable' is provided, it is called with each\n" + "completed request's received value (or None) and it s Status object as\n" + "its arguments. The calls occur in the order given by the `requests' list.\n"; + +/*********************************************************** + * exception documentation * + ***********************************************************/ +const char* exception_docstring = + "Instances of this class will be thrown when an MPI error\n" + "occurs. MPI failures that trigger these exceptions may or may not\n" + "be recoverable, depending on the underlying MPI implementation.\n" + "Consult the documentation for your MPI implementation to determine\n" + "the effect of MPI errors.\n"; + +const char* exception_what_docstring = + "A description of the error that occured. At present, this refers\n" + "only to the name of the MPI routine that failed.\n"; + +const char* exception_routine_docstring = + "The name of the MPI routine that reported the error.\n"; + +const char* exception_result_code_docstring = + "The result code returned from the MPI routine that reported the\n" + "error.\n"; + +/*********************************************************** + * collectives documentation * + ***********************************************************/ +const char* all_gather_docstring = + "all_gather is a collective algorithm that collects the values\n" + "stored at each process into a tuple of values indexed by the\n" + "process number they came from. all_gather is (semantically) a\n" + "gather followed by a broadcast. The same tuple of values is\n" + "returned to all processes.\n"; + +const char* all_reduce_docstring = + "all_reduce is a collective algorithm that combines the values\n" + "stored by each process into a single value. The values can be\n" + "combined arbitrarily, specified via any function. The values\n" + "a1, a2, .., ap provided by p processors will be combined by the\n" + "binary function op into the result\n" + " op(a1, op(a2, ... op(ap-1,ap)))\n" + "that will be returned to all processes. This function is the\n" + "equivalent of calling all_gather() and then applying the built-in\n" + "reduce() function to the returned sequence. op is assumed to be\n" + "associative.\n"; + +const char* all_to_all_docstring = + "all_to_all is a collective algorithm that transmits values from\n" + "every process to every other process. On process i, the jth value\n" + "of the values sequence is sent to process j and placed in the ith\n" + "position of the tuple that will be returned from all_to_all.\n"; + +const char* broadcast_docstring = + "broadcast is a collective algorithm that transfers a value from an\n" + "arbitrary root process to every other process that is part of the\n" + "given communicator (comm). The root parameter must be the same for\n" + "every process. The value parameter need only be specified at the root\n" + "root. broadcast() returns the same broadcasted value to every process.\n"; + +const char* gather_docstring = + "gather is a collective algorithm that collects the values\n" + "stored at each process into a tuple of values at the root\n" + "process. This tuple is indexed by the process number that the\n" + "value came from, and will be returned only by the root process.\n" + "All other processes return None.\n"; + +const char* reduce_docstring = + "reduce is a collective algorithm that combines the values\n" + "stored by each process into a single value at the root. The\n" + "values can be combined arbitrarily, specified via any function.\n" + "The values a1, a2, .., ap provided by p processors will be\n" + "combined by the binary function op into the result\n" + " op(a1, op(a2, ... op(ap-1,ap)))\n" + "that will be returned on the root process. This function is the\n" + "equivalent of calling gather() to the root and then applying the\n" + "built-in reduce() function to the returned sequence. All non-root\n" + "processes return None. op is assumed to be associative.\n"; + +const char* scan_docstring = + "@c scan computes a prefix reduction of values from all processes.\n" + "It is a collective algorithm that combines the values stored by\n" + "each process with the values of all processes with a smaller rank.\n" + "The values can be arbitrarily combined, specified via a binary\n" + "function op. If each process i provides the value ai, then scan\n" + "returns op(a1, op(a2, ... op(ai-1, ai))) to the ith process. op is\n" + "assumed to be associative. This routine is the equivalent of an\n" + "all_gather(), followed by a built-in reduce() on the first i+1\n" + "values in the resulting sequence on processor i. op is assumed\n" + "to be associative.\n"; + +const char* scatter_docstring = + "scatter is a collective algorithm that scatters the values stored\n" + "in the root process (as a container with comm.size elements) to\n" + "all of the processes in the communicator. The values parameter \n" + "(only significant at the root) is indexed by the process number to\n" + "which the corresponding value will be sent. The value received by \n" + "each process is returned from scatter.\n"; + +/*********************************************************** + * communicator documentation * + ***********************************************************/ +const char* communicator_docstring = + "The Communicator class abstracts a set of communicating\n" + "processes in MPI. All of the processes that belong to a certain\n" + "communicator can determine the size of the communicator, their rank\n" + "within the communicator, and communicate with any other processes\n" + "in the communicator.\n"; + +const char* communicator_default_constructor_docstring = + "Build a new Boost.MPI Communicator instance for MPI_COMM_WORLD.\n"; + +const char* communicator_rank_docstring = + "Returns the rank of the process in the communicator, which will be a\n" + "value in [0, size).\n"; + +const char* communicator_size_docstring = + "Returns the number of processes in the communicator.\n"; + +const char* communicator_send_docstring = + "This routine executes a potentially blocking send with the given\n" + "tag to the process with rank dest. It can be received by the\n" + "destination process with a matching recv call. The value will be\n" + "transmitted in one of several ways:\n" + "\n" + " - For C++ objects registered via register_serialized(), the value\n" + " will be serialized and transmitted.\n" + "\n" + " - For SkeletonProxy objects, the skeleton of the object will be\n" + " serialized and transmitted.\n" + "\n" + " - For Content objects, the content will be transmitted directly.\n" + " This content can be received by a matching recv/irecv call that\n" + " provides a suitable `buffer' argument.\n" + "\n" + " - For all other Python objects, the value will be pickled and\n" + " transmitted.\n"; + +const char* communicator_recv_docstring = + "This routine blocks until it receives a message from the process\n" + "source with the given tag. If the source parameter is not specified,\n" + "the message can be received from any process. Likewise, if the tag\n" + "parameter is not specified, a message with any tag can be received.\n" + "If return_status is True, returns a tuple containing the received\n" + "object followed by a Status object describing the communication.\n" + "Otherwise, recv() returns just the received object.\n" + "\n" + "When receiving the content of a data type that has been sent separately\n" + "from its skeleton, user code must provide a value for the `buffer'\n" + "argument. This value should be the Content object returned from\n" + "get_content().\n"; + +const char* communicator_isend_docstring = + "This routine executes a nonblocking send with the given\n" + "tag to the process with rank dest. It can be received by the\n" + "destination process with a matching recv call. The value will be\n" + "transmitted in the same way as with send().\n" + "This routine returns a Request object, which can be used to query\n" + "when the transmission has completed, wait for its completion, or\n" + "cancel the transmission.\n"; + +const char* communicator_irecv_docstring = + "This routine initiates a non-blocking receive from the process\n" + "source with the given tag. If the source parameter is not specified,\n" + "the message can be received from any process. Likewise, if the tag\n" + "parameter is not specified, a message with any tag can be received.\n" + "This routine returns a Request object, which can be used to query\n" + "when the transmission has completed, wait for its completion, or\n" + "cancel the transmission. The received value be accessible\n" + "through the `value' attribute of the Request object once transmission\n" + "has completed.\n" + "\n" + "As with the recv() routine, when receiving the content of a data type\n" + "that has been sent separately from its skeleton, user code must provide\n" + "a value for the `buffer' argument. This value should be the Content\n" + "object returned from get_content().\n"; + + const char* communicator_probe_docstring = + "This operation waits until a message matching (source, tag)\n" + "is available to be received. It then returns information about\n" + "that message. If source is omitted, a message from any process\n" + "will match. If tag is omitted, a message with any tag will match.\n" + "The actual source and tag can be retrieved from the returned Status\n" + "object. To check if a message is available without blocking, use\n" + "iprobe.\n"; + +const char* communicator_iprobe_docstring = + "This operation determines if a message matching (source, tag) is\n" + "available to be received. If so, it returns information about that\n" + "message; otherwise, it returns None. If source is omitted, a message\n" + "from any process will match. If tag is omitted, a message with any\n" + "tag will match. The actual source and tag can be retrieved from the\n" + "returned Status object. To wait for a message to become available, use\n" + "probe.\n"; + +const char* communicator_barrier_docstring = + "Wait for all processes within a communicator to reach the\n" + "barrier.\n"; + +const char* communicator_split_docstring = + "Split the communicator into multiple, disjoint communicators\n" + "each of which is based on a particular color. This is a\n" + "collective operation that returns a new communicator that is a\n" + "subgroup of this. This routine is functionally equivalent to\n" + "MPI_Comm_split.\n\n" + "color is the color of this process. All processes with the\n" + "same color value will be placed into the same group.\n\n" + "If provided, key is a key value that will be used to determine\n" + "the ordering of processes with the same color in the resulting\n" + "communicator. If omitted, the key will default to the rank of\n" + "the process in the current communicator.\n\n" + "Returns a new Communicator instance containing all of the \n" + "processes in this communicator that have the same color.\n"; + +const char* communicator_abort_docstring = + "Makes a \"best attempt\" to abort all of the tasks in the group of\n" + "this communicator. Depending on the underlying MPI\n" + "implementation, this may either abort the entire program (and\n" + "possibly return errcode to the environment) or only abort\n" + "some processes, allowing the others to continue. Consult the\n" + "documentation for your MPI implementation. This is equivalent to\n" + "a call to MPI_Abort\n\n" + "errcode is the error code to return from aborted processes.\n"; + +/*********************************************************** + * request documentation * + ***********************************************************/ +const char* request_docstring = + "The Request class contains information about a non-blocking send\n" + "or receive and will be returned from isend or irecv, respectively.\n" + "When a Request object represents a completed irecv, the `value' \n" + "attribute will contain the received value.\n"; + +const char* request_with_value_docstring = + "This class is an implementation detail. Any call that accepts a\n" + "Request also accepts a RequestWithValue, and vice versa.\n"; + +const char* request_wait_docstring = + "Wait until the communication associated with this request has\n" + "completed. For a request that is associated with an isend(), returns\n" + "a Status object describing the communication. For an irecv()\n" + "operation, returns the received value by default. However, when\n" + "return_status=True, a (value, status) pair is returned by a\n" + "completed irecv request.\n"; + +const char* request_test_docstring = + "Determine whether the communication associated with this request\n" + "has completed successfully. If so, returns the Status object\n" + "describing the communication (for an isend request) or a tuple\n" + "containing the received value and a Status object (for an irecv\n" + "request). Note that once test() returns a Status object, the\n" + "request has completed and wait() should not be called.\n"; + +const char* request_cancel_docstring = + "Cancel a pending communication, assuming it has not already been\n" + "completed.\n"; + +const char* request_value_docstring = + "If this request originated in an irecv(), this property makes the" + "sent value accessible once the request completes.\n" + "\n" + "If no value is available, ValueError is raised.\n"; + +/*********************************************************** + * skeleton/content documentation * + ***********************************************************/ +const char* object_without_skeleton_docstring = + "The ObjectWithoutSkeleton class is an exception class used only\n" + "when the skeleton() or get_content() function is called with an\n" + "object that is not supported by the skeleton/content mechanism.\n" + "All C++ types for which skeletons and content can be transmitted\n" + "must be registered with the C++ routine:\n" + " boost::mpi::python::register_skeleton_and_content\n"; + +const char* object_without_skeleton_object_docstring = + "The object on which skeleton() or get_content() was invoked.\n"; + +const char* skeleton_proxy_docstring = + "The SkeletonProxy class is used to represent the skeleton of an\n" + "object. The SkeletonProxy can be used as the value parameter of\n" + "send() or isend() operations, but instead of transmitting the\n" + "entire object, only its skeleton (\"shape\") will be sent, without\n" + "the actual data. Its content can then be transmitted, separately.\n" + "\n" + "User code cannot generate SkeletonProxy instances directly. To\n" + "refer to the skeleton of an object, use skeleton(object). Skeletons\n" + "can also be received with the recv() and irecv() methods.\n" + "\n" + "Note that the skeleton/content mechanism can only be used with C++\n" + "types that have been explicitly registered.\n"; + +const char* skeleton_proxy_object_docstring = + "The actual object whose skeleton is represented by this proxy object.\n"; + +const char* content_docstring = + "The content is a proxy class that represents the content of an object,\n" + "which can be separately sent or received from its skeleton.\n" + "\n" + "User code cannot generate content instances directly. Call the\n" + "get_content() routine to retrieve the content proxy for a particular\n" + "object. The content instance can be used with any of the send() or\n" + "recv() variants. Note that get_content() can only be used with C++\n" + "data types that have been explicitly registered with the Python\n" + "skeleton/content mechanism.\n"; + +const char* skeleton_docstring = + "The skeleton function retrieves the SkeletonProxy for its object\n" + "parameter, allowing the transmission of the skeleton (or \"shape\")\n" + "of the object separately from its data. The skeleton/content mechanism\n" + "is useful when a large data structure remains structurally the same\n" + "throughout a computation, but its content (i.e., the values in the\n" + "structure) changes several times. Tranmission of the content part does\n" + "not require any serialization or unnecessary buffer copies, so it is\n" + "very efficient for large data structures.\n" + "\n" + "Only C++ types that have been explicitly registered with the Boost.MPI\n" + "Python library can be used with the skeleton/content mechanism. Use:\b" + " boost::mpi::python::register_skeleton_and_content\n"; + +const char* get_content_docstring = + "The get_content function retrieves the content for its object parameter,\n" + "allowing the transmission of the data in a data structure separately\n" + "from its skeleton (or \"shape\"). The skeleton/content mechanism\n" + "is useful when a large data structure remains structurally the same\n" + "throughout a computation, but its content (i.e., the values in the\n" + "structure) changes several times. Tranmission of the content part does\n" + "not require any serialization or unnecessary buffer copies, so it is\n" + "very efficient for large data structures.\n" + "\n" + "Only C++ types that have been explicitly registered with the Boost.MPI\n" + "Python library can be used with the skeleton/content mechanism. Use:\b" + " boost::mpi::python::register_skeleton_and_content\n"; + +/*********************************************************** + * status documentation * + ***********************************************************/ +const char* status_docstring = + "The Status class stores information about a given message, including\n" + "its source, tag, and whether the message transmission was cancelled\n" + "or resulted in an error.\n"; + +const char* status_source_docstring = + "The source of the incoming message.\n"; + +const char* status_tag_docstring = + "The tag of the incoming message.\n"; + +const char* status_error_docstring = + "The error code associated with this transmission.\n"; + +const char* status_cancelled_docstring = + "Whether this transmission was cancelled.\n"; + +/*********************************************************** + * timer documentation * + ***********************************************************/ +const char* timer_docstring = + "The Timer class is a simple wrapper around the MPI timing facilities.\n"; + +const char* timer_default_constructor_docstring = + "Initializes the timer. After this call, elapsed == 0.\n"; + +const char* timer_restart_docstring = + "Restart the timer, after which elapsed == 0.\n"; + +const char* timer_elapsed_docstring = + "The time elapsed since initialization or the last restart(),\n" + "whichever is more recent.\n"; + +const char* timer_elapsed_min_docstring = + "Returns the minimum non-zero value that elapsed may return\n" + "This is the resolution of the timer.\n"; + +const char* timer_elapsed_max_docstring = + "Return an estimate of the maximum possible value of elapsed. Note\n" + "that this routine may return too high a value on some systems.\n"; + +const char* timer_time_is_global_docstring = + "Determines whether the elapsed time values are global times or\n" + "local processor times.\n"; + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/module.cpp b/src/boost/libs/mpi/src/python/module.cpp new file mode 100644 index 000000000..ec3696843 --- /dev/null +++ b/src/boost/libs/mpi/src/python/module.cpp @@ -0,0 +1,55 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file module.cpp + * + * This file provides the top-level module for the Boost.MPI Python + * bindings. + */ +#include <boost/python.hpp> +#include <boost/mpi.hpp> + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern void export_environment(); +extern void export_exception(); +extern void export_collectives(); +extern void export_communicator(); +extern void export_datatypes(); +extern void export_request(); +extern void export_status(); +extern void export_timer(); +extern void export_nonblocking(); + +extern const char* module_docstring; + +BOOST_PYTHON_MODULE(mpi) +{ + // Setup module documentation + scope().attr("__doc__") = module_docstring; + scope().attr("__author__") = "Douglas Gregor <doug.gregor@gmail.com>"; + scope().attr("__date__") = "$LastChangedDate$"; + scope().attr("__version__") = "$Revision$"; + scope().attr("__copyright__") = "Copyright (C) 2006 Douglas Gregor"; + scope().attr("__license__") = "http://www.boost.org/LICENSE_1_0.txt"; + + export_environment(); + export_exception(); + export_communicator(); + export_collectives(); + export_datatypes(); + export_request(); + export_status(); + export_timer(); + export_nonblocking(); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/py_communicator.cpp b/src/boost/libs/mpi/src/python/py_communicator.cpp new file mode 100644 index 000000000..6e53f56f9 --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_communicator.cpp @@ -0,0 +1,133 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file communicator.cpp + * + * This file reflects the Boost.MPI @c communicator class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi.hpp> +#include <boost/mpi/python/serialize.hpp> +#include "request_with_value.hpp" + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* communicator_docstring; +extern const char* communicator_default_constructor_docstring; +extern const char* communicator_rank_docstring; +extern const char* communicator_size_docstring; +extern const char* communicator_send_docstring; +extern const char* communicator_recv_docstring; +extern const char* communicator_isend_docstring; +extern const char* communicator_irecv_docstring; +extern const char* communicator_probe_docstring; +extern const char* communicator_iprobe_docstring; +extern const char* communicator_barrier_docstring; +extern const char* communicator_split_docstring; +extern const char* communicator_split_key_docstring; +extern const char* communicator_abort_docstring; + +object +communicator_recv(const communicator& comm, int source, int tag, + bool return_status) +{ + using boost::python::make_tuple; + + object result; + status stat = comm.recv(source, tag, result); + if (return_status) + return make_tuple(result, stat); + else + return result; +} + +request_with_value +communicator_irecv(const communicator& comm, int source, int tag) +{ + boost::shared_ptr<object> result(new object()); + request_with_value req(comm.irecv(source, tag, *result)); + req.m_internal_value = result; + return req; +} + +object +communicator_iprobe(const communicator& comm, int source, int tag) +{ + if (boost::optional<status> result = comm.iprobe(source, tag)) + return object(*result); + else + return object(); +} + +extern void export_skeleton_and_content(class_<communicator>&); + +void export_communicator() +{ + using boost::python::arg; + using boost::python::object; + + class_<communicator> comm("Communicator", communicator_docstring); + comm + .def(init<>()) + .add_property("rank", &communicator::rank, communicator_rank_docstring) + .add_property("size", &communicator::size, communicator_size_docstring) + .def("send", + (void (communicator::*)(int, int, const object&) const) + &communicator::send<object>, + (arg("dest"), arg("tag") = 0, arg("value") = object()), + communicator_send_docstring) + .def("recv", &communicator_recv, + (arg("source") = any_source, arg("tag") = any_tag, + arg("return_status") = false), + communicator_recv_docstring) + .def("isend", + (request (communicator::*)(int, int, const object&) const) + &communicator::isend<object>, + (arg("dest"), arg("tag") = 0, arg("value") = object()), + communicator_isend_docstring) + .def("irecv", &communicator_irecv, + (arg("source") = any_source, arg("tag") = any_tag), + communicator_irecv_docstring) + .def("probe", &communicator::probe, + (arg("source") = any_source, arg("tag") = any_tag), + communicator_probe_docstring) + .def("iprobe", &communicator_iprobe, + (arg("source") = any_source, arg("tag") = any_tag), + communicator_iprobe_docstring) + .def("barrier", &communicator::barrier, communicator_barrier_docstring) + .def("__nonzero__", &communicator::operator bool) + .def("split", + (communicator (communicator::*)(int) const)&communicator::split, + (arg("color")), communicator_split_docstring) + .def("split", + (communicator (communicator::*)(int, int) const)&communicator::split, + (arg("color"), arg("key"))) + .def("abort", &communicator::abort, arg("errcode"), + communicator_abort_docstring) + ; + + // Module-level attributes + scope().attr("any_source") = any_source; + scope().attr("any_tag") = any_tag; + + { + communicator world; + scope().attr("world") = world; + scope().attr("rank") = world.rank(); + scope().attr("size") = world.size(); + } + + // Export skeleton and content + export_skeleton_and_content(comm); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/py_environment.cpp b/src/boost/libs/mpi/src/python/py_environment.cpp new file mode 100644 index 000000000..27af7804a --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_environment.cpp @@ -0,0 +1,123 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file environment.cpp + * + * This file reflects the Boost.MPI "environment" class into Python + * methods at module level. + */ + +#include <locale> +#include <string> +#include <boost/python.hpp> +#include <boost/mpi.hpp> + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* environment_init_docstring; +extern const char* environment_finalize_docstring; +extern const char* environment_abort_docstring; +extern const char* environment_initialized_docstring; +extern const char* environment_finalized_docstring; + +/** + * The environment used by the Boost.MPI Python module. This will be + * zero-initialized before it is used. + */ +static environment* env; + +bool mpi_init(list python_argv, bool abort_on_exception) +{ + // If MPI is already initialized, do nothing. + if (environment::initialized()) + return false; + +#if PY_MAJOR_VERSION >= 3 + #ifdef BOOST_MPI_HAS_NOARG_INITIALIZATION + env = new environment(abort_on_exception); + #else + #error No argument initialization, supported from MPI 1.2 and up, is needed when using Boost.MPI with Python 3.x + #endif +#else + + // Convert Python argv into C-style argc/argv. + int my_argc = extract<int>(python_argv.attr("__len__")()); + char** my_argv = new char*[my_argc]; + for (int arg = 0; arg < my_argc; ++arg) + my_argv[arg] = strdup(extract<const char*>(python_argv[arg])); + + // Initialize MPI + int mpi_argc = my_argc; + char** mpi_argv = my_argv; + env = new environment(mpi_argc, mpi_argv, abort_on_exception); + + // If anything changed, convert C-style argc/argv into Python argv + if (mpi_argv != my_argv) + PySys_SetArgv(mpi_argc, mpi_argv); + + for (int arg = 0; arg < mpi_argc; ++arg) + free(mpi_argv[arg]); + delete [] mpi_argv; +#endif + + return true; +} + +void mpi_finalize() +{ + if (env) { + delete env; + env = 0; + } +} + +void export_environment() +{ + using boost::python::arg; + + def("init", mpi_init, (arg("argv"), arg("abort_on_exception") = true), + environment_init_docstring); + def("finalize", mpi_finalize, environment_finalize_docstring); + + // Setup initialization and finalization code + if (!environment::initialized()) { + // MPI_Init from sys.argv + object sys = object(handle<>(PyImport_ImportModule("sys"))); + mpi_init(extract<list>(sys.attr("argv")), true); + + // Setup MPI_Finalize call when the program exits + object atexit = object(handle<>(PyImport_ImportModule("atexit"))); + object finalize = scope().attr("finalize"); + atexit.attr("register")(finalize); + } + + def("abort", &environment::abort, arg("errcode"), + environment_abort_docstring); + def("initialized", &environment::initialized, + environment_initialized_docstring); + def("finalized", &environment::finalized, + environment_finalized_docstring); + scope().attr("max_tag") = environment::max_tag(); + scope().attr("collectives_tag") = environment::collectives_tag(); + scope().attr("processor_name") = environment::processor_name(); + + if (optional<int> host_rank = environment::host_rank()) + scope().attr("host_rank") = *host_rank; + else + scope().attr("host_rank") = object(); + + if (optional<int> io_rank = environment::io_rank()) + scope().attr("io_rank") = *io_rank; + else + scope().attr("io_rank") = object(); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/py_exception.cpp b/src/boost/libs/mpi/src/python/py_exception.cpp new file mode 100644 index 000000000..fee48c4d1 --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_exception.cpp @@ -0,0 +1,54 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com> +// Copyright (C) 2005 The Trustees of Indiana University. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file exception.cpp + * + * This file reflects the Boost.MPI @c mpi_error class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi/exception.hpp> +#include <string> +#include <boost/lexical_cast.hpp> +#include "utility.hpp" + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* exception_docstring; +extern const char* exception_what_docstring; +extern const char* exception_routine_docstring; +extern const char* exception_result_code_docstring; + +str exception_str(const exception& e) +{ + return str(std::string(e.what()) + + " (code " + lexical_cast<std::string>(e.result_code())+")"); +} + +void export_exception() +{ + using boost::python::arg; + using boost::python::object; + + object type = + class_<exception> + ("Exception", exception_docstring, no_init) + .add_property("what", &exception::what, exception_what_docstring) + .add_property("routine", &exception::what, exception_routine_docstring) + .add_property("result_code", &exception::result_code, + exception_result_code_docstring) + .def("__str__", &exception_str) + ; + translate_exception<exception>::declare(type); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/py_nonblocking.cpp b/src/boost/libs/mpi/src/python/py_nonblocking.cpp new file mode 100644 index 000000000..8666c0499 --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_nonblocking.cpp @@ -0,0 +1,250 @@ +// (C) Copyright 2007 +// Douglas Gregor <doug.gregor -at- gmail.com> +// Andreas Kloeckner <inform -at- tiker.net> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor, Andreas Kloeckner + +/** @file py_nonblocking.cpp + * + * This file reflects the Boost.MPI nonblocking operations into Python + * functions. + */ + +#include <vector> +#include <iterator> +#include <algorithm> +#include <boost/operators.hpp> +#include <boost/python.hpp> +#include <boost/python/stl_iterator.hpp> +#include <boost/python/suite/indexing/vector_indexing_suite.hpp> +#include <boost/mpi.hpp> +#include <boost/shared_ptr.hpp> +#include "request_with_value.hpp" + +using namespace std; +using namespace boost::python; +using namespace boost::mpi; + +namespace +{ + template <class ValueType, class RequestIterator> + class py_call_output_iterator : + public boost::output_iterator_helper< + py_call_output_iterator<ValueType, RequestIterator> > + { + private: + object m_callable; + RequestIterator m_request_iterator; + + public: + explicit py_call_output_iterator(object callable, + const RequestIterator &req_it) + : m_callable(callable), m_request_iterator(req_it) + { } + + py_call_output_iterator &operator=(ValueType const &v) + { + m_callable((m_request_iterator++)->get_value_or_none(), v); + return *this; + } + }; + + + + typedef std::vector<python::request_with_value> request_list; + typedef py_call_output_iterator<status, request_list::iterator> + status_value_iterator; + + + boost::shared_ptr<request_list> make_request_list_from_py_list(object iterable) + { + boost::shared_ptr<request_list> result(new request_list); + std::copy( + stl_input_iterator<python::request_with_value>(iterable), + stl_input_iterator<python::request_with_value>(), + back_inserter(*result)); + return result; + } + + + + + class request_list_indexing_suite : + public vector_indexing_suite<request_list, false, request_list_indexing_suite> + { + public: + // FIXME: requests are not comparable, thus __contains__ makes no sense. + // Unfortunately, indexing_suites insist on having __contains__ available. + // Just make it error out for now. + + static bool + contains(request_list& container, request const& key) + { + PyErr_SetString(PyExc_NotImplementedError, "mpi requests are not comparable"); + throw error_already_set(); + } + }; + + + + + void check_request_list_not_empty(const request_list &requests) + { + if (requests.size() == 0) + { + PyErr_SetString(PyExc_ValueError, "cannot wait on an empty request vector"); + throw error_already_set(); + } + + } + + + + + + object wrap_wait_any(request_list &requests) + { + check_request_list_not_empty(requests); + + pair<status, request_list::iterator> result = + wait_any(requests.begin(), requests.end()); + + return boost::python::make_tuple( + result.second->get_value_or_none(), + result.first, + distance(requests.begin(), result.second)); + } + + + + + object wrap_test_any(request_list &requests) + { + check_request_list_not_empty(requests); + ::boost::optional<pair<status, request_list::iterator> > result = + test_any(requests.begin(), requests.end()); + + if (result) + return boost::python::make_tuple( + result->second->get_value_or_none(), + result->first, + distance(requests.begin(), result->second)); + else + return object(); + } + + + + + + void wrap_wait_all(request_list &requests, object py_callable) + { + check_request_list_not_empty(requests); + if (py_callable != object()) + wait_all(requests.begin(), requests.end(), + status_value_iterator(py_callable, requests.begin())); + else + wait_all(requests.begin(), requests.end()); + } + + + + + bool wrap_test_all(request_list &requests, object py_callable) + { + check_request_list_not_empty(requests); + if (py_callable != object()) + return bool(test_all(requests.begin(), requests.end(), + status_value_iterator(py_callable, requests.begin()))); + else + return bool(test_all(requests.begin(), requests.end())); + } + + + + + int wrap_wait_some(request_list &requests, object py_callable) + { + check_request_list_not_empty(requests); + request_list::iterator first_completed; + if (py_callable != object()) + first_completed = wait_some(requests.begin(), requests.end(), + status_value_iterator(py_callable, requests.begin())).second; + else + first_completed = wait_some(requests.begin(), requests.end()); + + return distance(requests.begin(), first_completed); + } + + + + + int wrap_test_some(request_list &requests, object py_callable) + { + check_request_list_not_empty(requests); + request_list::iterator first_completed; + if (py_callable != object()) + first_completed = test_some(requests.begin(), requests.end(), + status_value_iterator(py_callable, requests.begin())).second; + else + first_completed = test_some(requests.begin(), requests.end()); + + return distance(requests.begin(), first_completed); + } +} + + + + +namespace boost { namespace mpi { namespace python { + +extern const char* request_list_init_docstring; +extern const char* request_list_append_docstring; + +extern const char* nonblocking_wait_any_docstring; +extern const char* nonblocking_test_any_docstring; +extern const char* nonblocking_wait_all_docstring; +extern const char* nonblocking_test_all_docstring; +extern const char* nonblocking_wait_some_docstring; +extern const char* nonblocking_test_some_docstring; + +void export_nonblocking() +{ + using boost::python::arg; + + { + typedef request_list cl; + class_<cl>("RequestList", "A list of Request objects.") + .def("__init__", make_constructor(make_request_list_from_py_list), + /*arg("iterable"),*/ request_list_init_docstring) + .def(request_list_indexing_suite()) + ; + } + + def("wait_any", wrap_wait_any, + (arg("requests")), + nonblocking_wait_any_docstring); + def("test_any", wrap_test_any, + (arg("requests")), + nonblocking_test_any_docstring); + + def("wait_all", wrap_wait_all, + (arg("requests"), arg("callable") = object()), + nonblocking_wait_all_docstring); + def("test_all", wrap_test_all, + (arg("requests"), arg("callable") = object()), + nonblocking_test_all_docstring); + + def("wait_some", wrap_wait_some, + (arg("requests"), arg("callable") = object()), + nonblocking_wait_some_docstring); + def("test_some", wrap_test_some, + (arg("requests"), arg("callable") = object()), + nonblocking_test_some_docstring); +} + +} } } diff --git a/src/boost/libs/mpi/src/python/py_request.cpp b/src/boost/libs/mpi/src/python/py_request.cpp new file mode 100644 index 000000000..55b2b5aec --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_request.cpp @@ -0,0 +1,111 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file request.cpp + * + * This file reflects the Boost.MPI @c request class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi.hpp> +#include "request_with_value.hpp" + +using namespace boost::python; +using namespace boost::mpi; + +const object python::request_with_value::get_value() const +{ + if (m_internal_value.get()) + return *m_internal_value; + else if (m_external_value) + return *m_external_value; + else + { + PyErr_SetString(PyExc_ValueError, "request value not available"); + throw boost::python::error_already_set(); + } +} + +const object python::request_with_value::get_value_or_none() const +{ + if (m_internal_value.get()) + return *m_internal_value; + else if (m_external_value) + return *m_external_value; + else + return object(); +} + +const object python::request_with_value::wrap_wait() +{ + status stat = request::wait(); + if (m_internal_value.get() || m_external_value) + return boost::python::make_tuple(get_value(), stat); + else + return object(stat); +} + +const object python::request_with_value::wrap_test() +{ + ::boost::optional<status> stat = request::test(); + if (stat) + { + if (m_internal_value.get() || m_external_value) + return boost::python::make_tuple(get_value(), *stat); + else + return object(*stat); + } + else + return object(); +} + + +namespace boost { namespace mpi { namespace python { + +const object request_test(request &req) +{ + ::boost::optional<status> stat = req.test(); + if (stat) + return object(*stat); + else + return object(); +} + +extern const char* request_docstring; +extern const char* request_with_value_docstring; +extern const char* request_wait_docstring; +extern const char* request_test_docstring; +extern const char* request_cancel_docstring; +extern const char* request_value_docstring; + +void export_request() +{ + using boost::python::arg; + using boost::python::object; + + { + typedef request cl; + class_<cl>("Request", request_docstring, no_init) + .def("wait", &cl::wait, request_wait_docstring) + .def("test", &request_test, request_test_docstring) + .def("cancel", &cl::cancel, request_cancel_docstring) + ; + } + { + typedef request_with_value cl; + class_<cl, bases<request> >( + "RequestWithValue", request_with_value_docstring, no_init) + .def("wait", &cl::wrap_wait, request_wait_docstring) + .def("test", &cl::wrap_test, request_test_docstring) + ; + } + + implicitly_convertible<request, request_with_value>(); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/py_timer.cpp b/src/boost/libs/mpi/src/python/py_timer.cpp new file mode 100644 index 000000000..88b1b4062 --- /dev/null +++ b/src/boost/libs/mpi/src/python/py_timer.cpp @@ -0,0 +1,48 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file timer.cpp + * + * This file reflects the Boost.MPI @c timer class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi/timer.hpp> + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* timer_docstring; +extern const char* timer_default_constructor_docstring; +extern const char* timer_restart_docstring; +extern const char* timer_elapsed_docstring; +extern const char* timer_elapsed_min_docstring; +extern const char* timer_elapsed_max_docstring; +extern const char* timer_time_is_global_docstring; + +void export_timer() +{ + using boost::python::arg; + using boost::python::object; + + class_<timer>("Timer", timer_docstring) + .def(init<>()) + .def("restart", &timer::restart, timer_restart_docstring) + .add_property("elapsed", &timer::elapsed, timer_elapsed_docstring) + .add_property("elapsed_min", &timer::elapsed_min, + timer_elapsed_min_docstring) + .add_property("elapsed_max", &timer::elapsed_max, + timer_elapsed_max_docstring) + .add_property("time_is_global", &timer::time_is_global, + timer_time_is_global_docstring) + ; +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/request_with_value.hpp b/src/boost/libs/mpi/src/python/request_with_value.hpp new file mode 100644 index 000000000..04d0c53e9 --- /dev/null +++ b/src/boost/libs/mpi/src/python/request_with_value.hpp @@ -0,0 +1,71 @@ +// (C) Copyright 2006 +// Douglas Gregor <doug.gregor -at- gmail.com> +// Andreas Kloeckner <inform -at- tiker.net> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor, Andreas Kloeckner + +#ifndef BOOST_MPI_PYTHON_REQUEST_WITH_VALUE_HPP +#define BOOST_MPI_PYTHON_REQUEST_WITH_VALUE_HPP + +#include <boost/python.hpp> +#include <boost/mpi.hpp> + +namespace boost { namespace mpi { namespace python { + + /** This wrapper adds a @c boost::python::object value to the @c + * boost::mpi::request structure, for the benefit of @c irecv() requests. + * + * In order to be able to return the value of his requests to the user, we + * need a handle that we can update to contain the transmitted value once the + * request completes. Since we're passing the address on to irecv to fill at + * any time in the future, this address may not change over time. + * + * There are two possible cases: + * - plain irecv() + * - skeleton-content irecv() + * + * In the first case, we need to own the storage from this object, the + * m_internal_value is used for this. In the second case, the updated + * python::object is part of a boost::mpi::python::content object: the + * m_external_value field handles this case. Furthermore, in the latter case, + * we now have a lifetime dependency on that content object; this can be + * handled with the BPL's with_custodian_and_ward facility. + * + * Since requests and request_with_value are supposed to be copyconstructible, + * we can't put the handle immediately inside this instance. Moreover, since + * we need to be able to put request_with_value inside request_vectors, any + * values we own must be held in a shared_ptr instance. + */ + + class request_with_value : public request + { + private: + boost::shared_ptr<boost::python::object> m_internal_value; + boost::python::object *m_external_value; + + public: + request_with_value() + : m_external_value(0) + { } + request_with_value(const request &req) + : request(req), m_external_value(0) + { } + + const boost::python::object get_value() const; + const boost::python::object get_value_or_none() const; + + const boost::python::object wrap_wait(); + const boost::python::object wrap_test(); + + friend request_with_value communicator_irecv(const communicator &, int, int); + friend request_with_value communicator_irecv_content( + const communicator&, int, int, content&); + }; + +} } } + +#endif diff --git a/src/boost/libs/mpi/src/python/serialize.cpp b/src/boost/libs/mpi/src/python/serialize.cpp new file mode 100644 index 000000000..92004a340 --- /dev/null +++ b/src/boost/libs/mpi/src/python/serialize.cpp @@ -0,0 +1,79 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file serialize.cpp + * + * This file provides Boost.Serialization support for Python objects. + */ +#include <boost/mpi/python/serialize.hpp> +#include <boost/mpi/python/skeleton_and_content.hpp> +#include <boost/mpi.hpp> + +namespace boost { namespace python { + +struct pickle::data_t { + object module; + object dumps; + object loads; +}; + + +/// Data used for communicating with the Python `pickle' module. +pickle::data_t* pickle::data; + +str pickle::dumps(object obj, int protocol) +{ + if (!data) initialize_data(); + return extract<str>((data->dumps)(obj, protocol)); +} + +object pickle::loads(str s) +{ + if (!data) initialize_data(); + return ((data->loads)(s)); +} + +void pickle::initialize_data() +{ + data = new data_t; + data->module = object(handle<>(PyImport_ImportModule("pickle"))); + data->dumps = data->module.attr("dumps"); + data->loads = data->module.attr("loads"); +} + +} } // end namespace boost::python + +BOOST_PYTHON_DIRECT_SERIALIZATION_ARCHIVE_IMPL( + ::boost::mpi::packed_iarchive, + ::boost::mpi::packed_oarchive) + +namespace boost { namespace mpi { namespace python { namespace detail { + + boost::python::object skeleton_proxy_base_type; + + // A map from Python type objects to skeleton/content handlers + typedef std::map<PyTypeObject*, skeleton_content_handler> + skeleton_content_handlers_type; + + BOOST_MPI_PYTHON_DECL skeleton_content_handlers_type skeleton_content_handlers; + + bool + skeleton_and_content_handler_registered(PyTypeObject* type) + { + return + skeleton_content_handlers.find(type) != skeleton_content_handlers.end(); + } + + void + register_skeleton_and_content_handler(PyTypeObject* type, + const skeleton_content_handler& handler) + { + skeleton_content_handlers[type] = handler; + } + +} } } } // end namespace boost::mpi::python::detail diff --git a/src/boost/libs/mpi/src/python/skeleton_and_content.cpp b/src/boost/libs/mpi/src/python/skeleton_and_content.cpp new file mode 100644 index 000000000..d5376c14f --- /dev/null +++ b/src/boost/libs/mpi/src/python/skeleton_and_content.cpp @@ -0,0 +1,173 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file skeleton_and_content.cpp + * + * This file reflects the skeleton/content facilities into Python. + */ +#include <boost/mpi/python/skeleton_and_content.hpp> +#include <boost/mpi/python/serialize.hpp> +#include <boost/python/list.hpp> +#include <typeinfo> +#include <list> +#include "utility.hpp" +#include "request_with_value.hpp" + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +namespace detail { + typedef std::map<PyTypeObject*, skeleton_content_handler> + skeleton_content_handlers_type; + +// We're actually importing skeleton_content_handlers from skeleton_and_content.cpp. +#if defined(BOOST_HAS_DECLSPEC) && (defined(BOOST_MPI_PYTHON_DYN_LINK) || defined(BOOST_ALL_DYN_LINK)) +# define BOOST_SC_DECL __declspec(dllimport) +#else +# define BOOST_SC_DECL +#endif + + extern BOOST_SC_DECL skeleton_content_handlers_type skeleton_content_handlers; +} + +/** + * An exception that will be thrown when the object passed to the + * Python version of skeleton() does not have a skeleton. + */ +struct object_without_skeleton : public std::exception { + explicit object_without_skeleton(object value) : value(value) { } + virtual ~object_without_skeleton() throw() { } + + object value; +}; + +str object_without_skeleton_str(const object_without_skeleton& e) +{ + return str("\nThe skeleton() or get_content() function was invoked for a Python\n" + "object that is not supported by the Boost.MPI skeleton/content\n" + "mechanism. To transfer objects via skeleton/content, you must\n" + "register the C++ type of this object with the C++ function:\n" + " boost::mpi::python::register_skeleton_and_content()\n" + "Object: " + str(e.value) + "\n"); +} + +/** + * Extract the "skeleton" from a Python object. In truth, all we're + * doing at this point is verifying that the object is a C++ type that + * has been registered for the skeleton/content mechanism. + */ +object skeleton(object value) +{ + PyTypeObject* type = value.ptr()->ob_type; + detail::skeleton_content_handlers_type::iterator pos = + detail::skeleton_content_handlers.find(type); + if (pos == detail::skeleton_content_handlers.end()) + throw object_without_skeleton(value); + else + return pos->second.get_skeleton_proxy(value); +} + +/** + * Extract the "content" from a Python object, which must be a C++ + * type that has been registered for the skeleton/content mechanism. + */ +content get_content(object value) +{ + PyTypeObject* type = value.ptr()->ob_type; + detail::skeleton_content_handlers_type::iterator pos = + detail::skeleton_content_handlers.find(type); + if (pos == detail::skeleton_content_handlers.end()) + throw object_without_skeleton(value); + else + return pos->second.get_content(value); +} + +/// Send the content part of a Python object. +void +communicator_send_content(const communicator& comm, int dest, int tag, + const content& c) +{ + comm.send(dest, tag, c.base()); +} + +/// Receive the content of a Python object. We return the object +/// received, not the content wrapper. +object +communicator_recv_content(const communicator& comm, int source, int tag, + const content& c, bool return_status) +{ + using boost::python::make_tuple; + + status stat = comm.recv(source, tag, c.base()); + if (return_status) + return make_tuple(c.object, stat); + else + return c.object; +} + +/// Receive the content of a Python object. The request object's value +/// attribute will reference the object whose content is being +/// received, not the content wrapper. +request_with_value +communicator_irecv_content(const communicator& comm, int source, int tag, + content& c) +{ + request_with_value req(comm.irecv(source, tag, c.base())); + req.m_external_value = &c.object; + return req; +} + +extern const char* object_without_skeleton_docstring; +extern const char* object_without_skeleton_object_docstring; +extern const char* skeleton_proxy_docstring; +extern const char* skeleton_proxy_object_docstring; +extern const char* content_docstring; +extern const char* skeleton_docstring; +extern const char* get_content_docstring; + +void export_skeleton_and_content(class_<communicator>& comm) +{ + using boost::python::arg; + + // Expose the object_without_skeleton exception + object type = + class_<object_without_skeleton> + ("ObjectWithoutSkeleton", object_without_skeleton_docstring, no_init) + .def_readonly("object", &object_without_skeleton::value, + object_without_skeleton_object_docstring) + .def("__str__", &object_without_skeleton_str) + ; + translate_exception<object_without_skeleton>::declare(type); + + // Expose the Python variants of "skeleton_proxy" and "content", and + // their generator functions. + detail::skeleton_proxy_base_type = + class_<skeleton_proxy_base>("SkeletonProxy", skeleton_proxy_docstring, + no_init) + .def_readonly("object", &skeleton_proxy_base::object, + skeleton_proxy_object_docstring); + class_<content>("Content", content_docstring, no_init); + def("skeleton", &skeleton, arg("object"), skeleton_docstring); + def("get_content", &get_content, arg("object"), get_content_docstring); + + // Expose communicator send/recv operations for content. + comm + .def("send", communicator_send_content, + (arg("dest"), arg("tag") = 0, arg("value"))) + .def("recv", communicator_recv_content, + (arg("source") = any_source, arg("tag") = any_tag, arg("buffer"), + arg("return_status") = false)) + .def("irecv", communicator_irecv_content, + (arg("source") = any_source, arg("tag") = any_tag, arg("buffer")), + with_custodian_and_ward_postcall<0, 4>() + ); +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/status.cpp b/src/boost/libs/mpi/src/python/status.cpp new file mode 100644 index 000000000..a74221a7a --- /dev/null +++ b/src/boost/libs/mpi/src/python/status.cpp @@ -0,0 +1,41 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +/** @file status.cpp + * + * This file reflects the Boost.MPI @c status class into + * Python. + */ +#include <boost/python.hpp> +#include <boost/mpi.hpp> + +using namespace boost::python; +using namespace boost::mpi; + +namespace boost { namespace mpi { namespace python { + +extern const char* status_docstring; +extern const char* status_source_docstring; +extern const char* status_tag_docstring; +extern const char* status_error_docstring; +extern const char* status_cancelled_docstring; + +void export_status() +{ + using boost::python::arg; + using boost::python::object; + + class_<status>("Status", status_docstring, no_init) + .add_property("source", &status::source, status_source_docstring) + .add_property("tag", &status::tag, status_tag_docstring) + .add_property("error", &status::error, status_error_docstring) + .add_property("cancelled", &status::cancelled, status_cancelled_docstring) + ; +} + +} } } // end namespace boost::mpi::python diff --git a/src/boost/libs/mpi/src/python/utility.hpp b/src/boost/libs/mpi/src/python/utility.hpp new file mode 100644 index 000000000..ed0016761 --- /dev/null +++ b/src/boost/libs/mpi/src/python/utility.hpp @@ -0,0 +1,43 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor +#ifndef BOOST_MPI_PYTHON_UTILITY_HPP +#define BOOST_MPI_PYTHON_UTILITY_HPP + +/** @file utility.hpp + * + * This file is a utility header for the Boost.MPI Python bindings. + */ +#include <boost/python.hpp> + +namespace boost { namespace mpi { namespace python { + +template<typename E> +class translate_exception +{ + explicit translate_exception(boost::python::object type) : type(type) { } + +public: + static void declare(boost::python::object type) + { + using boost::python::register_exception_translator; + register_exception_translator<E>(translate_exception(type)); + } + + void operator()(const E& e) const + { + using boost::python::object; + PyErr_SetObject(type.ptr(), object(e).ptr()); + } + +private: + boost::python::object type; +}; + +} } } // end namespace boost::mpi::python + +#endif // BOOST_MPI_PYTHON_UTILITY_HPP diff --git a/src/boost/libs/mpi/src/request.cpp b/src/boost/libs/mpi/src/request.cpp new file mode 100644 index 000000000..3ba5695aa --- /dev/null +++ b/src/boost/libs/mpi/src/request.cpp @@ -0,0 +1,239 @@ +// Copyright (C) 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/request.hpp> +#include <boost/mpi/status.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/detail/request_handlers.hpp> + +namespace boost { namespace mpi { + +request::request() + : m_handler() {} + +void +request::preserve(boost::shared_ptr<void> d) { + if (!m_preserved) { + m_preserved = d; + } else { + boost::shared_ptr<void> cdr = m_preserved; + typedef std::pair<boost::shared_ptr<void>, boost::shared_ptr<void> > cons; + boost::shared_ptr<cons> p(new cons(d, cdr)); + m_preserved = p; + } +} +request request::make_dynamic() { return request(new dynamic_handler()); } + +request +request::make_bottom_send(communicator const& comm, int dest, int tag, MPI_Datatype tp) { + trivial_handler* handler = new trivial_handler; + BOOST_MPI_CHECK_RESULT(MPI_Isend, + (MPI_BOTTOM, 1, tp, + dest, tag, comm, &handler->m_request)); + return request(handler); +} + +request +request::make_empty_send(communicator const& comm, int dest, int tag) { + trivial_handler* handler = new trivial_handler; + BOOST_MPI_CHECK_RESULT(MPI_Isend, + (MPI_BOTTOM, 0, MPI_PACKED, + dest, tag, comm, &handler->m_request)); + return request(handler); +} + +request +request::make_bottom_recv(communicator const& comm, int dest, int tag, MPI_Datatype tp) { + trivial_handler* handler = new trivial_handler; + BOOST_MPI_CHECK_RESULT(MPI_Irecv, + (MPI_BOTTOM, 1, tp, + dest, tag, comm, &handler->m_request)); + return request(handler); +} + +request +request::make_empty_recv(communicator const& comm, int dest, int tag) { + trivial_handler* handler = new trivial_handler; + BOOST_MPI_CHECK_RESULT(MPI_Irecv, + (MPI_BOTTOM, 0, MPI_PACKED, + dest, tag, comm, &handler->m_request)); + return request(handler); +} + +request +request::make_packed_send(communicator const& comm, int dest, int tag, void const* buffer, std::size_t n) { +#if defined(BOOST_MPI_USE_IMPROBE) + { + trivial_handler* handler = new trivial_handler; + BOOST_MPI_CHECK_RESULT(MPI_Isend, + (const_cast<void*>(buffer), n, MPI_PACKED, + dest, tag, comm, &handler->m_request)); + return request(handler); + } +#else + { + dynamic_handler *handler = new dynamic_handler; + request req(handler); + shared_ptr<std::size_t> size(new std::size_t(n)); + req.preserve(size); + BOOST_MPI_CHECK_RESULT(MPI_Isend, + (size.get(), 1, + get_mpi_datatype(*size), + dest, tag, comm, handler->m_requests)); + BOOST_MPI_CHECK_RESULT(MPI_Isend, + (const_cast<void*>(buffer), *size, + MPI_PACKED, + dest, tag, comm, handler->m_requests+1)); + return req; + } +#endif +} + +/*************************************************************************** + * handlers * + ***************************************************************************/ + +request::handler::~handler() {} + +optional<MPI_Request&> +request::legacy_handler::trivial() { + return boost::none; +} + +bool +request::legacy_handler::active() const { + return m_requests[0] != MPI_REQUEST_NULL || m_requests[1] != MPI_REQUEST_NULL; +} + +// trivial handler + +request::trivial_handler::trivial_handler() + : m_request(MPI_REQUEST_NULL) {} + +status +request::trivial_handler::wait() +{ + status result; + BOOST_MPI_CHECK_RESULT(MPI_Wait, (&m_request, &result.m_status)); + return result; +} + + +optional<status> +request::trivial_handler::test() +{ + status result; + int flag = 0; + BOOST_MPI_CHECK_RESULT(MPI_Test, + (&m_request, &flag, &result.m_status)); + return flag != 0? optional<status>(result) : optional<status>(); +} + +void +request::trivial_handler::cancel() +{ + BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_request)); +} + +bool +request::trivial_handler::active() const +{ + return m_request != MPI_REQUEST_NULL; +} + +optional<MPI_Request&> +request::trivial_handler::trivial() +{ + return m_request; +} + +// dynamic handler + +request::dynamic_handler::dynamic_handler() +{ + m_requests[0] = MPI_REQUEST_NULL; + m_requests[1] = MPI_REQUEST_NULL; +} + +status +request::dynamic_handler::wait() +{ + // This request is a send of a serialized type, broken into two + // separate messages. Complete both sends at once. + MPI_Status stats[2]; + int error_code = MPI_Waitall(2, m_requests, stats); + if (error_code == MPI_ERR_IN_STATUS) { + // Dig out which status structure has the error, and use that + // one when throwing the exception. + if (stats[0].MPI_ERROR == MPI_SUCCESS + || stats[0].MPI_ERROR == MPI_ERR_PENDING) + boost::throw_exception(exception("MPI_Waitall", stats[1].MPI_ERROR)); + else + boost::throw_exception(exception("MPI_Waitall", stats[0].MPI_ERROR)); + } else if (error_code != MPI_SUCCESS) { + // There was an error somewhere in the MPI_Waitall call; throw + // an exception for it. + boost::throw_exception(exception("MPI_Waitall", error_code)); + } + + // No errors. Returns the first status structure. + status result; + result.m_status = stats[0]; + return result; +} + +optional<status> +request::dynamic_handler::test() +{ + // This request is a send of a serialized type, broken into two + // separate messages. We only get a result if both complete. + MPI_Status stats[2]; + int flag = 0; + int error_code = MPI_Testall(2, m_requests, &flag, stats); + if (error_code == MPI_ERR_IN_STATUS) { + // Dig out which status structure has the error, and use that + // one when throwing the exception. + if (stats[0].MPI_ERROR == MPI_SUCCESS + || stats[0].MPI_ERROR == MPI_ERR_PENDING) + boost::throw_exception(exception("MPI_Testall", stats[1].MPI_ERROR)); + else + boost::throw_exception(exception("MPI_Testall", stats[0].MPI_ERROR)); + } else if (error_code != MPI_SUCCESS) { + // There was an error somewhere in the MPI_Testall call; throw + // an exception for it. + boost::throw_exception(exception("MPI_Testall", error_code)); + } + + // No errors. Returns the second status structure if the send has + // completed. + if (flag != 0) { + status result; + result.m_status = stats[1]; + return result; + } else { + return optional<status>(); + } +} + +void +request::dynamic_handler::cancel() +{ + BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[0])); + BOOST_MPI_CHECK_RESULT(MPI_Cancel, (&m_requests[1])); +} + +bool +request::dynamic_handler::active() const +{ + return (m_requests[0] != MPI_REQUEST_NULL + || m_requests[1] != MPI_REQUEST_NULL); +} + +optional<MPI_Request&> +request::dynamic_handler::trivial() { + return boost::none; +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/src/text_skeleton_oarchive.cpp b/src/boost/libs/mpi/src/text_skeleton_oarchive.cpp new file mode 100644 index 000000000..7817ea785 --- /dev/null +++ b/src/boost/libs/mpi/src/text_skeleton_oarchive.cpp @@ -0,0 +1,20 @@ +// (C) Copyright 2005 Matthias Troyer + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Matthias Troyer + +#define BOOST_ARCHIVE_SOURCE +#include <boost/mpi/detail/text_skeleton_oarchive.hpp> + +#include <boost/archive/detail/archive_serializer_map.hpp> +#include <boost/archive/impl/archive_serializer_map.ipp> + +namespace boost { namespace archive { +// explicitly instantiate all required templates + +// template class detail::archive_serializer_map<text_oarchive>; + +} } // end namespace boost::archive diff --git a/src/boost/libs/mpi/src/timer.cpp b/src/boost/libs/mpi/src/timer.cpp new file mode 100644 index 000000000..e3a7dfc43 --- /dev/null +++ b/src/boost/libs/mpi/src/timer.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +#include <boost/mpi/timer.hpp> +#include <boost/mpi/exception.hpp> + +namespace boost { namespace mpi { + +bool timer::time_is_global() +{ + int* is_global; + int found = 0; + + BOOST_MPI_CHECK_RESULT(MPI_Comm_get_attr, + (MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &is_global, + &found)); + if (!found) + return false; + else + return *is_global != 0; +} + +} } // end namespace boost::mpi diff --git a/src/boost/libs/mpi/test/Jamfile.v2 b/src/boost/libs/mpi/test/Jamfile.v2 new file mode 100644 index 000000000..ac9ac2539 --- /dev/null +++ b/src/boost/libs/mpi/test/Jamfile.v2 @@ -0,0 +1,55 @@ +# Support for the Message Passing Interface (MPI) +# +# (C) Copyright 2005, 2006 Trustees of Indiana University +# (C) Copyright 2005 Douglas Gregor +# +# Distributed under the Boost Software License, Version 1.0. (See accompanying +# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) +# +# Authors: Douglas Gregor +# Andrew Lumsdaine + +project : requirements <library>/boost//mpi ; +import mpi : mpi-test ; + +if [ mpi.configured ] +{ +test-suite mpi + : + [ mpi-test version_test : : : 1 ] + [ mpi-test block_nonblock_test-b2nb : block_nonblock_test.cpp : : 2 ] + [ mpi-test block_nonblock_test-nb2b : block_nonblock_test.cpp : : 2 ] + [ mpi-test random_gather : ../example/random_gather.cpp : : 2 ] + [ mpi-test random_scatter : ../example/random_scatter.cpp : : 2 ] + [ mpi-test cartesian_communicator : ../example/cartesian_communicator.cpp : : 24 ] + [ mpi-test cartesian_topology_init_test : : : 1 ] + [ mpi-test broadcast_stl_test : : : 2 ] + [ mpi-test all_gather_test : : : 1 2 11 ] + [ mpi-test all_reduce_test : : : 1 2 11 ] + [ mpi-test all_to_all_test : : : 1 2 11 ] + [ mpi-test broadcast_test : : : 2 17 ] + [ mpi-test gather_test : : : 1 2 11 ] + [ mpi-test is_mpi_op_test : : : 1 ] + [ mpi-test mt_level_test : : : 1 ] + [ mpi-test mt_init_test : : : 1 4 ] + # Note: Microsoft MPI fails nonblocking_test on 1 processor + [ mpi-test nonblocking_test : : : 2 11 24 ] + [ mpi-test reduce_test ] + [ mpi-test ring_test : : : 2 3 4 7 8 13 17 ] + [ mpi-test sendrecv_test : : : 1 4 7 48 ] + [ mpi-test wait_any_test : : : 1 4 7 20 ] + [ mpi-test wait_all_vector_test : : : 2 ] + [ mpi-test scan_test ] + [ mpi-test scatter_test ] + # Note: Microsoft MPI fails all skeleton-content tests + [ mpi-test skeleton_content_test : : : 2 3 4 7 8 13 17 ] + [ mpi-test graph_topology_test : : : 2 7 13 ] + [ mpi-test cartesian_topology_test : : : 24 ] + [ mpi-test pointer_test : : : 2 ] + [ mpi-test groups_test ] + # tests that require -std=c++11 + [ mpi-test sendrecv_vector : : : 2 ] + # Intel MPI 2018 and older are axtected to fail: + [ mpi-test non_blocking_any_source : : : 2 17 ] + ; +} diff --git a/src/boost/libs/mpi/test/all_gather_test.cpp b/src/boost/libs/mpi/test/all_gather_test.cpp new file mode 100644 index 000000000..c2385f61e --- /dev/null +++ b/src/boost/libs/mpi/test/all_gather_test.cpp @@ -0,0 +1,148 @@ +// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the all_gather() collective. + +#include <algorithm> + +#include <boost/mpi/collectives/all_gather.hpp> +#include <boost/mpi/collectives/all_gatherv.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> + +#define BOOST_TEST_MODULE mpi_all_gather +#include <boost/test/included/unit_test.hpp> + +#include "gps_position.hpp" + +namespace mpi = boost::mpi; + +template<typename Generator> +void +all_gather_test(const mpi::communicator& comm, Generator generator, + std::string kind) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + + std::vector<value_type> values; + if (comm.rank() == 0) { + std::cout << "Gathering " << kind << "..."; + std::cout.flush(); + } + + mpi::all_gather(comm, value, values); + + std::vector<value_type> expected_values; + for (int p = 0; p < comm.size(); ++p) + expected_values.push_back(generator(p)); + BOOST_CHECK(values == expected_values); + if (comm.rank() == 0 && values == expected_values) + std::cout << "OK." << std::endl; + + (comm.barrier)(); +} + +template<typename Generator> +void +all_gatherv_test(const mpi::communicator& comm, Generator generator, + std::string kind) +{ + typedef typename Generator::result_type value_type; + using boost::mpi::all_gatherv; + + std::vector<value_type> myvalues, expected, values; + std::vector<int> sizes; + for(int r = 0; r < comm.size(); ++r) { + value_type value = generator(r); + sizes.push_back(r+1); + for (int k=0; k < r+1; ++k) { + expected.push_back(value); + if(comm.rank() == r) { + myvalues.push_back(value); + } + } + } + if (comm.rank() == 0) { + std::cout << "Gathering " << kind << "..."; + std::cout.flush(); + } + + mpi::all_gatherv(comm, myvalues, values, sizes); + + BOOST_CHECK(values == expected); + + if (comm.rank() == 0 && values == expected) + std::cout << "OK." << std::endl; + + (comm.barrier)(); +} + +// Generates integers to test with gather() +struct int_generator +{ + typedef int result_type; + + int operator()(int p) const { return 17 + p; } +}; + +// Generates GPS positions to test with gather() +struct gps_generator +{ + typedef gps_position result_type; + + gps_position operator()(int p) const + { + return gps_position(39 + p, 16, 20.2799); + } +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct string_list_generator +{ + typedef std::list<std::string> result_type; + + std::list<std::string> operator()(int p) const + { + std::list<std::string> result; + for (int i = 0; i <= p; ++i) { + std::string value = boost::lexical_cast<std::string>(i); + result.push_back(value); + } + return result; + } +}; + +BOOST_AUTO_TEST_CASE(all_gather) +{ + boost::mpi::environment env; + mpi::communicator comm; + all_gather_test(comm, int_generator(), "integers"); + all_gather_test(comm, gps_generator(), "GPS positions"); + all_gather_test(comm, string_generator(), "string"); + all_gather_test(comm, string_list_generator(), "list of strings"); + + all_gatherv_test(comm, int_generator(), "integers"); + all_gatherv_test(comm, gps_generator(), "GPS positions"); + all_gatherv_test(comm, string_generator(), "string"); + all_gatherv_test(comm, string_list_generator(), "list of strings"); +} diff --git a/src/boost/libs/mpi/test/all_reduce_test.cpp b/src/boost/libs/mpi/test/all_reduce_test.cpp new file mode 100644 index 000000000..99fee9b8a --- /dev/null +++ b/src/boost/libs/mpi/test/all_reduce_test.cpp @@ -0,0 +1,309 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the all_reduce() collective. +#include <boost/mpi/collectives/all_reduce.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <vector> +#include <algorithm> +#include <boost/serialization/string.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> +#include <numeric> + +#define BOOST_TEST_MODULE mpi_all_reduce +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +// A simple point class that we can build, add, compare, and +// serialize. +struct point +{ + point() : x(0), y(0), z(0) { } + point(int x, int y, int z) : x(x), y(y), z(z) { } + + int x; + int y; + int z; + + private: + template<typename Archiver> + void serialize(Archiver& ar, unsigned int /*version*/) + { + ar & x & y & z; + } + + friend class boost::serialization::access; +}; + +std::ostream& operator<<(std::ostream& out, const point& p) +{ + return out << p.x << ' ' << p.y << ' ' << p.z; +} + +bool operator==(const point& p1, const point& p2) +{ + return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; +} + +bool operator!=(const point& p1, const point& p2) +{ + return !(p1 == p2); +} + +point operator+(const point& p1, const point& p2) +{ + return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); +} + +// test lexical order +bool operator<(const point& p1, const point& p2) +{ + return (p1.x < p2.x + ? true + : (p1.x > p2.x + ? false + : p1.y < p2.y )); +} + +namespace boost { namespace mpi { + + template <> + struct is_mpi_datatype<point> : public mpl::true_ { }; + +} } // end namespace boost::mpi + +template<typename Generator, typename Op> +void +all_reduce_one_test(const communicator& comm, Generator generator, + const char* type_kind, Op op, const char* op_kind, + typename Generator::result_type init, bool in_place) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + + using boost::mpi::all_reduce; + using boost::mpi::inplace; + + if (comm.rank() == 0) { + std::cout << "Reducing to " << op_kind << " of " << type_kind << "..."; + std::cout.flush(); + } + + value_type result_value; + if (in_place) { + all_reduce(comm, inplace(value), op); + result_value = value; + } else { + result_value = all_reduce(comm, value, op); + } + + // Compute expected result + std::vector<value_type> generated_values; + for (int p = 0; p < comm.size(); ++p) + generated_values.push_back(generator(p)); + value_type expected_result = std::accumulate(generated_values.begin(), + generated_values.end(), + init, op); + BOOST_CHECK(result_value == expected_result); + if (result_value == expected_result && comm.rank() == 0) + std::cout << "OK." << std::endl; + + (comm.barrier)(); +} + +template<typename Generator, typename Op> +void +all_reduce_array_test(const communicator& comm, Generator generator, + const char* type_kind, Op op, const char* op_kind, + typename Generator::result_type init, bool in_place) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + std::vector<value_type> send(10, value); + + using boost::mpi::all_reduce; + using boost::mpi::inplace; + + if (comm.rank() == 0) { + char const* place = in_place ? "in place" : "out of place"; + std::cout << "Reducing (" << place << ") array to " << op_kind << " of " << type_kind << "..."; + std::cout.flush(); + } + std::vector<value_type> result; + if (in_place) { + all_reduce(comm, inplace(&(send[0])), send.size(), op); + result.swap(send); + } else { + std::vector<value_type> recv(10, value_type()); + all_reduce(comm, &(send[0]), send.size(), &(recv[0]), op); + result.swap(recv); + } + + // Compute expected result + std::vector<value_type> generated_values; + for (int p = 0; p < comm.size(); ++p) + generated_values.push_back(generator(p)); + value_type expected_result = std::accumulate(generated_values.begin(), + generated_values.end(), + init, op); + + bool got_expected_result = (std::equal_range(result.begin(), result.end(), + expected_result) + == std::make_pair(result.begin(), result.end())); + BOOST_CHECK(got_expected_result); + if (got_expected_result && comm.rank() == 0) + std::cout << "OK." << std::endl; + + (comm.barrier)(); +} + +// Test the 4 families of all reduce: (value, array) X (in place, out of place) +template<typename Generator, typename Op> +void +all_reduce_test(const communicator& comm, Generator generator, + const char* type_kind, Op op, const char* op_kind, + typename Generator::result_type init) +{ + const bool in_place = true; + const bool out_of_place = false; + all_reduce_one_test(comm, generator, type_kind, op, op_kind, init, in_place); + all_reduce_one_test(comm, generator, type_kind, op, op_kind, init, out_of_place); + all_reduce_array_test(comm, generator, type_kind, op, op_kind, + init, in_place); + all_reduce_array_test(comm, generator, type_kind, op, op_kind, + init, out_of_place); +} + +// Generates integers to test with all_reduce() +struct int_generator +{ + typedef int result_type; + + int_generator(int base = 1) : base(base) { } + + int operator()(int p) const { return base + p; } + + private: + int base; +}; + +// Generate points to test with all_reduce() +struct point_generator +{ + typedef point result_type; + + point_generator(point origin) : origin(origin) { } + + point operator()(int p) const + { + return point(origin.x + 1, origin.y + 1, origin.z + 1); + } + + private: + point origin; +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct secret_int_bit_and +{ + int operator()(int x, int y) const { return x & y; } +}; + +struct wrapped_int +{ + wrapped_int() : value(0) { } + explicit wrapped_int(int value) : value(value) { } + + template<typename Archive> + void serialize(Archive& ar, unsigned int /* version */) + { + ar & value; + } + + int value; +}; + +wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) +{ + return wrapped_int(x.value + y.value); +} + +bool operator==(const wrapped_int& x, const wrapped_int& y) +{ + return x.value == y.value; +} + +bool operator<(const wrapped_int& x, const wrapped_int& y) +{ + return x.value < y.value; +} + +// Generates wrapped_its to test with all_reduce() +struct wrapped_int_generator +{ + typedef wrapped_int result_type; + + wrapped_int_generator(int base = 1) : base(base) { } + + wrapped_int operator()(int p) const { return wrapped_int(base + p); } + + private: + int base; +}; + +namespace boost { namespace mpi { + +// Make std::plus<wrapped_int> commutative. +template<> +struct is_commutative<std::plus<wrapped_int>, wrapped_int> + : mpl::true_ { }; + +} } // end namespace boost::mpi + +BOOST_AUTO_TEST_CASE(test_all_reduce) +{ + using namespace boost::mpi; + environment env; + communicator comm; + + // Built-in MPI datatypes with built-in MPI operations + all_reduce_test(comm, int_generator(), "integers", std::plus<int>(), "sum", 0); + all_reduce_test(comm, int_generator(), "integers", std::multiplies<int>(), "product", 1); + all_reduce_test(comm, int_generator(), "integers", maximum<int>(), "maximum", 0); + all_reduce_test(comm, int_generator(), "integers", minimum<int>(), "minimum", 2); + + // User-defined MPI datatypes with operations that have the + // same name as built-in operations. + all_reduce_test(comm, point_generator(point(0,0,0)), "points", std::plus<point>(), + "sum", point()); + + // Built-in MPI datatypes with user-defined operations + all_reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(), + "bitwise and", -1); + + // Arbitrary types with user-defined, commutative operations. + all_reduce_test(comm, wrapped_int_generator(17), "wrapped integers", + std::plus<wrapped_int>(), "sum", wrapped_int(0)); + + // Arbitrary types with (non-commutative) user-defined operations + all_reduce_test(comm, string_generator(), "strings", + std::plus<std::string>(), "concatenation", std::string()); +} diff --git a/src/boost/libs/mpi/test/all_to_all_test.cpp b/src/boost/libs/mpi/test/all_to_all_test.cpp new file mode 100644 index 000000000..0e27dcf05 --- /dev/null +++ b/src/boost/libs/mpi/test/all_to_all_test.cpp @@ -0,0 +1,113 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the all_to_all() collective. +#include <boost/mpi/collectives/all_to_all.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <algorithm> +#include "gps_position.hpp" +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> + +#define BOOST_TEST_MODULE mpi_all_to_all +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +using boost::mpi::packed_skeleton_iarchive; +using boost::mpi::packed_skeleton_oarchive; + +template<typename Generator> +void +all_to_all_test(const communicator& comm, Generator generator, + const char* kind) +{ + typedef typename Generator::result_type value_type; + + using boost::mpi::all_to_all; + + std::vector<value_type> in_values; + for (int p = 0; p < comm.size(); ++p) + in_values.push_back(generator((p + 1) * (comm.rank() + 1))); + + if (comm.rank() == 0) { + std::cout << "Performing all-to-all operation on " << kind << "..."; + std::cout.flush(); + } + std::vector<value_type> out_values; + all_to_all(comm, in_values, out_values); + + for (int p = 0; p < comm.size(); ++p) { + BOOST_CHECK(out_values[p] == generator((p + 1) * (comm.rank() + 1))); + } + + if (comm.rank() == 0) { + std::cout << " done." << std::endl; + } + + (comm.barrier)(); +} + +// Generates integers to test with all_to_all() +struct int_generator +{ + typedef int result_type; + + int operator()(int p) const { return 17 + p; } +}; + +// Generates GPS positions to test with all_to_all() +struct gps_generator +{ + typedef gps_position result_type; + + gps_position operator()(int p) const + { + return gps_position(39 + p, 16, 20.2799); + } +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct string_list_generator +{ + typedef std::list<std::string> result_type; + + std::list<std::string> operator()(int p) const + { + std::list<std::string> result; + for (int i = 0; i <= p; ++i) { + std::string value = boost::lexical_cast<std::string>(i); + result.push_back(value); + } + return result; + } +}; + +BOOST_AUTO_TEST_CASE(all_to_all_check) +{ + boost::mpi::environment env; + communicator comm; + + all_to_all_test(comm, int_generator(), "integers"); + all_to_all_test(comm, gps_generator(), "GPS positions"); + all_to_all_test(comm, string_generator(), "string"); + all_to_all_test(comm, string_list_generator(), "list of strings"); +} diff --git a/src/boost/libs/mpi/test/block_nonblock_test.cpp b/src/boost/libs/mpi/test/block_nonblock_test.cpp new file mode 100644 index 000000000..3088b6559 --- /dev/null +++ b/src/boost/libs/mpi/test/block_nonblock_test.cpp @@ -0,0 +1,95 @@ +#include <vector> +#include <iostream> +#include <iterator> +#include <typeinfo> + +#include <boost/mpi.hpp> +#include <boost/serialization/vector.hpp> +#include <boost/core/demangle.hpp> + +//#include "debugger.cpp" + +#define BOOST_TEST_MODULE mpi_nonblocking +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +template<typename T> +bool test(mpi::communicator const& comm, std::vector<T> const& ref, bool iswap, bool alloc) +{ + + int rank = comm.rank(); + if (rank == 0) { + std::cout << "Testing with type " << boost::core::demangle(typeid(T).name()) << '\n'; + if (iswap) { + std::cout << "Blockin send, non blocking receive.\n"; + } else { + std::cout << "Non blockin send, blocking receive.\n"; + } + if (alloc) { + std::cout << "Explicitly allocate space for the receiver.\n"; + } else { + std::cout << "Do not explicitly allocate space for the receiver.\n"; + } + } + if (rank == 0) { + std::vector<T> data; + if (alloc) { + data.resize(ref.size()); + } + if (iswap) { + mpi::request req = comm.irecv(1, 0, data); + req.wait(); + } else { + comm.recv(1, 0, data); + } + std::cout << "Process 0 received " << data.size() << " elements :" << std::endl; + std::copy(data.begin(), data.end(), std::ostream_iterator<T>(std::cout, " ")); + std::cout << std::endl; + std::cout << "While expecting " << ref.size() << " elements :" << std::endl; + std::copy(ref.begin(), ref.end(), std::ostream_iterator<T>(std::cout, " ")); + std::cout << std::endl; + return (data == ref); + } else { + if (rank == 1) { + std::vector<T> vec = ref; + if (iswap) { + comm.send(0, 0, vec); + } else { + mpi::request req = comm.isend(0, 0, vec); + req.wait(); + } + } + return true; + } +} + +BOOST_AUTO_TEST_CASE(non_blocking) +{ + mpi::environment env; + mpi::communicator world; + + BOOST_TEST_REQUIRE(world.size() > 1); + + std::vector<int> integers(13); // don't assume we're lucky + for(int i = 0; i < int(integers.size()); ++i) { + integers[i] = i; + } + + std::vector<std::string> strings(13); // don't assume we're lucky + for(int i = 0; i < int(strings.size()); ++i) { + std::ostringstream fmt; + fmt << "S" << i; + strings[i] = fmt.str(); + } + + BOOST_CHECK(test(world, integers, true, true)); + BOOST_CHECK(test(world, integers, true, false)); + BOOST_CHECK(test(world, strings, true, true)); + BOOST_CHECK(test(world, strings, true, false)); + + BOOST_CHECK(test(world, integers, false, true)); + BOOST_CHECK(test(world, integers, false, false)); + BOOST_CHECK(test(world, strings, false, true)); + BOOST_CHECK(test(world, strings, false, false)); +} diff --git a/src/boost/libs/mpi/test/broadcast_stl_test.cpp b/src/boost/libs/mpi/test/broadcast_stl_test.cpp new file mode 100644 index 000000000..705d6ccb9 --- /dev/null +++ b/src/boost/libs/mpi/test/broadcast_stl_test.cpp @@ -0,0 +1,75 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the broadcast() collective. +#include <algorithm> +#include <vector> +#include <map> + +#include <boost/mpi/collectives/broadcast.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> + +#include <boost/serialization/string.hpp> +#include <boost/serialization/vector.hpp> +#include <boost/serialization/map.hpp> + +#define BOOST_TEST_MODULE mpi_broadcast_stl +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +typedef std::vector<std::map<int, double> > sparse; + +template<typename T> +void +broadcast_test(const mpi::communicator& comm, const T& bc_value, + std::string const& kind, int root) { + using boost::mpi::broadcast; + + T value; + if (comm.rank() == root) { + value = bc_value; + std::cout << "Broadcasting " << kind << " from root " << root << "..."; + std::cout.flush(); + } + + + broadcast(comm, value, root); + BOOST_CHECK(value == bc_value); + if (comm.rank() == root) { + if (value == bc_value) { + std::cout << "OK." << std::endl; + } else { + std::cout << "FAIL." << std::endl; + } + } + comm.barrier(); +} + +template<typename T> +void +broadcast_test(const mpi::communicator& comm, const T& bc_value, + std::string const& kind) +{ + for (int root = 0; root < comm.size(); ++root) { + broadcast_test(comm, bc_value, kind, root); + } +} + +BOOST_AUTO_TEST_CASE(broadcast_stl) +{ + boost::mpi::environment env; + + mpi::communicator comm; + BOOST_TEST_REQUIRE(comm.size() > 1); + + sparse s; + s.resize(2); + s[0][12] = 0.12; + s[1][13] = 1.13; + broadcast_test(comm, s, "sparse"); +} diff --git a/src/boost/libs/mpi/test/broadcast_test.cpp b/src/boost/libs/mpi/test/broadcast_test.cpp new file mode 100644 index 000000000..50081aad9 --- /dev/null +++ b/src/boost/libs/mpi/test/broadcast_test.cpp @@ -0,0 +1,159 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the broadcast() collective. +#include <boost/mpi/collectives/broadcast.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <algorithm> +#include "gps_position.hpp" +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <boost/mpi/skeleton_and_content.hpp> +#include <boost/iterator/counting_iterator.hpp> +//#include "debugger.hpp" + +#define BOOST_TEST_MODULE mpi_broadcast +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +using boost::mpi::packed_skeleton_iarchive; +using boost::mpi::packed_skeleton_oarchive; + +template<typename T> +void +broadcast_test(const communicator& comm, const T& bc_value, + const char* kind, int root = -1) +{ + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + broadcast_test(comm, bc_value, kind, root); + } else { + using boost::mpi::broadcast; + + T value; + if (comm.rank() == root) { + value = bc_value; + std::cout << "Broadcasting " << kind << " from root " << root << "..."; + std::cout.flush(); + } + + broadcast(comm, value, root); + BOOST_CHECK(value == bc_value); + if (comm.rank() == root && value == bc_value) + std::cout << "OK." << std::endl; + } + + (comm.barrier)(); +} + +void +test_skeleton_and_content(const communicator& comm, int root = 0) +{ + using boost::mpi::content; + using boost::mpi::get_content; + using boost::make_counting_iterator; + using boost::mpi::broadcast; + + int list_size = comm.size() + 7; + if (comm.rank() == root) { + // Fill in the seed data + std::list<int> original_list; + for (int i = 0; i < list_size; ++i) + original_list.push_back(i); + + // Build up the skeleton + packed_skeleton_oarchive oa(comm); + oa << original_list; + + // Broadcast the skeleton + std::cout << "Broadcasting integer list skeleton from root " << root + << "..." << std::flush; + broadcast(comm, oa, root); + std::cout << "OK." << std::endl; + + // Broadcast the content + std::cout << "Broadcasting integer list content from root " << root + << "..." << std::flush; + { + content c = get_content(original_list); + broadcast(comm, c, root); + } + std::cout << "OK." << std::endl; + + // Reverse the list, broadcast the content again + std::reverse(original_list.begin(), original_list.end()); + std::cout << "Broadcasting reversed integer list content from root " + << root << "..." << std::flush; + { + content c = get_content(original_list); + broadcast(comm, c, root); + } + std::cout << "OK." << std::endl; + + } else { + // Allocate some useless data, to try to get the addresses of the + // list<int>'s used later to be different across processes. + std::list<int> junk_list(comm.rank() * 3 + 1, 17); + + // Receive the skeleton + packed_skeleton_iarchive ia(comm); + broadcast(comm, ia, root); + + // Build up a list to match the skeleton, and make sure it has the + // right structure (we have no idea what the data will be). + std::list<int> transferred_list; + ia >> transferred_list; + BOOST_CHECK((int)transferred_list.size() == list_size); + + // Receive the content and check it + broadcast(comm, get_content(transferred_list), root); + bool list_content_ok = std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.begin()); + BOOST_CHECK(list_content_ok); + + // Receive the reversed content and check it + broadcast(comm, get_content(transferred_list), root); + bool rlist_content_ok = std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.rbegin()); + BOOST_CHECK(rlist_content_ok); + if (!(list_content_ok && rlist_content_ok)) { + if (comm.rank() == 1) { + std::cout + << "\n##### You might want to check for BOOST_MPI_BCAST_BOTTOM_WORKS_FINE " + << "in boost/mpi/config.hpp.\n\n"; + } + } + } + + (comm.barrier)(); +} + +BOOST_AUTO_TEST_CASE(broadcast_check) +{ + boost::mpi::environment env; + communicator comm; + + BOOST_TEST_REQUIRE(comm.size() > 1); + + // Check transfer of individual objects + broadcast_test(comm, 17, "integers"); + broadcast_test(comm, gps_position(39,16,20.2799), "GPS positions"); + broadcast_test(comm, gps_position(26,25,30.0), "GPS positions"); + broadcast_test(comm, std::string("Rosie"), "string"); + + std::list<std::string> strings; + strings.push_back("Hello"); + strings.push_back("MPI"); + strings.push_back("World"); + broadcast_test(comm, strings, "list of strings"); + + test_skeleton_and_content(comm, 0); + test_skeleton_and_content(comm, 1); +} diff --git a/src/boost/libs/mpi/test/cartesian_topology_init_test.cpp b/src/boost/libs/mpi/test/cartesian_topology_init_test.cpp new file mode 100644 index 000000000..6138a2e6c --- /dev/null +++ b/src/boost/libs/mpi/test/cartesian_topology_init_test.cpp @@ -0,0 +1,88 @@ + +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Alain Miniussi + +#include <vector> +#include <list> +#include <iostream> +#include <sstream> +#include <iterator> +#include <algorithm> +#include <functional> + +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/collectives.hpp> +#include <boost/array.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/cartesian_communicator.hpp> + +#define BOOST_TEST_MODULE mpi_cartesian_topology_init +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +BOOST_AUTO_TEST_CASE(cartesian_dimension_init) +{ + // Curly brace initialization syntax not supported on (very) old gnu + // This typedef keeps things shorter + typedef mpi::cartesian_dimension cd; + + { + // Check the basic ctor + mpi::cartesian_dimension def; + mpi::cartesian_topology t1(10); + BOOST_CHECK(t1.stl() == std::vector<mpi::cartesian_dimension>(10, def)); + } +#if !defined(BOOST_NO_CXX11_HDR_INITIALIZER_LIST) + { + // Intializer list ctor vs range based + int dims[] = {2,3,4}; + bool per[] = {true, false, true}; + mpi::cartesian_topology t1(dims, per); + mpi::cartesian_topology t2({{2,true},{3, false},{4, true}}); + BOOST_CHECK(t1.size() == 3); + BOOST_CHECK(t1 == t2); + } +#endif + // Container based ctor only available as a replacement for initializer list ctor + { + // seq ctor vs C array ctor + mpi::cartesian_dimension d[] = {cd(2,true),cd(3, false),cd(4, true)}; + std::list<mpi::cartesian_dimension> seq; + std::copy(d, d+3, std::back_inserter(seq)); + mpi::cartesian_topology t1(seq); + mpi::cartesian_topology t2(d); + BOOST_CHECK(t1 == t2); + } + { + // Check range based with array based ctor. + boost::array<mpi::cartesian_dimension, 3> d = {{cd(2,true),cd(3, false),cd(4, true)}}; + int dims[] = {2,3,4}; + bool per[] = {true, false, true}; + mpi::cartesian_topology t1(dims, per); + mpi::cartesian_topology t2(d); + BOOST_CHECK(t1.size() == 3); + BOOST_CHECK(t1 == t2); + } + { + // Iterator based ctor vs C array based ctor + mpi::cartesian_dimension d[] = {cd(2,true),cd(3, false),cd(4, true)}; + std::vector<mpi::cartesian_dimension> vdims(d, d+3); + mpi::cartesian_topology t1(vdims); + mpi::cartesian_topology t2(d); + BOOST_CHECK(t1.size() == 3); + BOOST_CHECK(t1 == t2); + BOOST_CHECK(!(t1 != t2)); + t1[1].periodic = true; + BOOST_CHECK(t1 != t2); + t1[2].periodic = false; + t1[2].size = 0; + vdims.push_back(mpi::cartesian_dimension(3, false)); + mpi::cartesian_topology t3(vdims); + BOOST_CHECK(t1 != t3); + } +} diff --git a/src/boost/libs/mpi/test/cartesian_topology_test.cpp b/src/boost/libs/mpi/test/cartesian_topology_test.cpp new file mode 100644 index 000000000..d63fc5003 --- /dev/null +++ b/src/boost/libs/mpi/test/cartesian_topology_test.cpp @@ -0,0 +1,193 @@ +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Alain Miniussi + +#include <vector> +#include <iostream> +#include <sstream> +#include <iterator> +#include <algorithm> +#include <functional> + +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/collectives.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/mpi/cartesian_communicator.hpp> + +#define BOOST_TEST_MODULE mpi_cartesian_topolohy +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +struct topo_minimum { + mpi::cartesian_dimension + operator()(mpi::cartesian_dimension const& d1, + mpi::cartesian_dimension const& d2 ) const { + return mpi::cartesian_dimension(std::min(d1.size, d2.size), + d1.periodic && d2.periodic); + } +}; + +std::string topology_description( mpi::cartesian_topology const& topo ) { + std::ostringstream out; + std::copy(topo.begin(), topo.end(), std::ostream_iterator<mpi::cartesian_dimension>(out, " ")); + out << std::flush; + return out.str(); +} + +// Check that everyone agrees on the coordinates +void test_coordinates_consistency( mpi::cartesian_communicator const& cc, + std::vector<int> const& coords ) +{ + cc.barrier(); // flush IOs for nice printing + bool master = cc.rank() == 0; + if (master) { + std::cout << "Test coordinates consistency.\n"; + } + for(int p = 0; p < cc.size(); ++p) { + std::vector<int> min(cc.ndims()); + std::vector<int> local(cc.coordinates(p)); + mpi::reduce(cc, &local.front(), local.size(), + &(min[0]), mpi::minimum<int>(), p); + cc.barrier(); + if (p == cc.rank()) { + BOOST_CHECK(std::equal(coords.begin(), coords.end(), min.begin())); + std::ostringstream out; + out << "proc " << p << " at ("; + std::copy(min.begin(), min.end(), std::ostream_iterator<int>(out, " ")); + out << ")\n"; + std::cout << out.str(); + } + } +} + +void test_shifted_coords( mpi::cartesian_communicator const& cc, int pos, mpi::cartesian_dimension desc, int dim ) +{ + if (desc.periodic) { + for (int i = -(desc.size); i < desc.size; ++i) { + std::pair<int,int> rks = cc.shifted_ranks(dim, i); + int src = cc.coordinates(rks.first)[dim]; + int dst = cc.coordinates(rks.second)[dim]; + if (pos == (dim/2)) { + std::ostringstream out; + out << "Rank " << cc.rank() << ", dim. " << dim << ", pos " << pos << ", in " << desc << ' '; + out << "shifted pos: " << src << ", " << dst << '\n'; + std::cout << out.str(); + } + } + } +} + +void test_shifted_coords( mpi::cartesian_communicator const& cc) +{ + cc.barrier(); // flush IOs for nice printing + std::vector<int> coords; + mpi::cartesian_topology topo(cc.ndims()); + cc.topology(topo, coords); + bool master = cc.rank() == 0; + if (master) { + std::cout << "Testing shifts with topology " << topo << '\n'; + } + for(int i = 0; i < cc.ndims(); ++i) { + if (master) { + std::cout << " for dimension " << i << ": " << topo[i] << '\n'; + } + test_shifted_coords( cc, coords[i], topo[i], i ); + } +} + +void test_topology_consistency( mpi::cartesian_communicator const& cc) +{ + cc.barrier(); // flush IOs for nice printing + mpi::cartesian_topology itopo(cc.ndims()); + mpi::cartesian_topology otopo(cc.ndims()); + std::vector<int> coords(cc.ndims()); + cc.topology(itopo, coords); + bool master = cc.rank() == 0; + if (master) { + std::cout << "Test topology consistency of" << itopo << "(on master)\n"; + std::cout << "Check that everyone agrees on the dimensions.\n"; + } + mpi::all_reduce(cc, + &(itopo[0]), itopo.size(), &(otopo[0]), + topo_minimum()); + BOOST_CHECK(std::equal(itopo.begin(), itopo.end(), otopo.begin())); + if (master) { + std::cout << "We agree on " << topology_description(otopo) << '\n'; + } + test_coordinates_consistency( cc, coords ); +} + +void test_cartesian_topology( mpi::cartesian_communicator const& cc) +{ + BOOST_CHECK(cc.has_cartesian_topology()); + for( int r = 0; r < cc.size(); ++r) { + cc.barrier(); + if (r == cc.rank()) { + std::vector<int> coords = cc.coordinates(r); + std::cout << "Process of cartesian rank " << cc.rank() + << " has coordinates ("; + std::copy(coords.begin(), coords.end(), std::ostream_iterator<int>(std::cout," ")); + std::cout << ")\n"; + } + } + test_topology_consistency(cc); + test_shifted_coords(cc); + std::vector<int> even; + for(int i = 0; i < cc.ndims(); i += 2) { + even.push_back(i); + } + cc.barrier(); + mpi::cartesian_communicator cce(cc, even); +} + +void test_cartesian_topology( mpi::communicator const& world, mpi::cartesian_topology const& topo) +{ + mpi::cartesian_communicator cc(world, topo, true); + if (cc) { + BOOST_CHECK(cc.has_cartesian_topology()); + BOOST_CHECK(cc.ndims() == int(topo.size())); + if (cc.rank() == 0) { + std::cout << "Asked topology " << topo << ", got " << cc.topology() << '\n'; + } + test_cartesian_topology(cc); + } else { + std::ostringstream out; + out << world.rank() << " was left outside the cartesian grid\n"; + std::cout << out.str(); + } +} + +BOOST_AUTO_TEST_CASE(cartesian_topology) +{ + mpi::environment env; + mpi::communicator world; + + int const ndim = world.size() >= 24 ? 3 : 2; + mpi::cartesian_topology topo(ndim); + typedef mpi::cartesian_dimension cd; + if (topo.size() == 3) { + topo[0] = cd(2,true); + topo[1] = cd(3,false); + topo[2] = cd(4, true); + } else { + if (world.size() >= 6) { + topo[0] = cd(2,true); + topo[1] = cd(3, false); + } else { + topo[0] = cd(1,true); + topo[1] = cd(1, false); + } + } + test_cartesian_topology( world, topo); +#if !defined(BOOST_NO_CXX11_DEFAULTED_MOVES) + world.barrier(); + if (world.rank()==0) { + std::cout << "Testing move constructor.\n"; + } + test_cartesian_topology( world, std::move(topo)); +#endif +} diff --git a/src/boost/libs/mpi/test/debugger.cpp b/src/boost/libs/mpi/test/debugger.cpp new file mode 100644 index 000000000..90093e79f --- /dev/null +++ b/src/boost/libs/mpi/test/debugger.cpp @@ -0,0 +1,48 @@ +// Copyright AlainMiniussi 20014 - 20015. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <cstdlib> + +#include "debugger.hpp" + +std::vector<int> extract_paused_ranks(int argc, char** argv) { + std::vector<int> paused; + for (int i=1; i < argc; ++i) { + paused.push_back(std::atoi(argv[i])); + } + return paused; +} + +void wait_for_debugger(std::vector<int> const& processes, boost::mpi::communicator const& comm) +{ + int i = 1; + bool waiting = std::find(processes.begin(), processes.end(), comm.rank()) != processes.end(); + for (int r = 0; r < comm.size(); ++r) { + if (comm.rank() == r) { + std::cout << "Rank " << comm.rank() << " has PID " << getpid(); + if (waiting) { + std::cout << " and is waiting."; + } + std::cout << std::endl; + } + comm.barrier(); + } + if (std::find(processes.begin(), processes.end(), comm.rank()) != processes.end()) { + while (i != 0) { + sleep(5); + } + } + std::cout << "Rank " << comm.rank() << " will proceed.\n"; +} + +void wait_for_debugger(boost::mpi::communicator const& comm) +{ + std::vector<int> all; + for (int r = 0; r < comm.size(); ++r) { + all.push_back(r); + } + wait_for_debugger(all, comm); +} + diff --git a/src/boost/libs/mpi/test/debugger.hpp b/src/boost/libs/mpi/test/debugger.hpp new file mode 100644 index 000000000..71abf51d0 --- /dev/null +++ b/src/boost/libs/mpi/test/debugger.hpp @@ -0,0 +1,26 @@ +// Copyright AlainMiniussi 20014 - 20015. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <vector> +#include "boost/mpi/communicator.hpp" + +/** + * @brief Extract the MPI rank to pause. + * + * Right now, just atois alla the parameters in argv.... + */ +std::vector<int> extract_paused_ranks(int argc, char** argv); + +/** + * @print Print rank pid map and wait if requested. + * @param processes Wait if our rank is in there. + * @param comm The communicator to consider. + * + * Once the debugger has attached to the process, it is expected to + * set the local variable 'i' to 0 to let the process restarts. + */ +void wait_for_debugger(std::vector<int> const& processes, boost::mpi::communicator const& comm); +/** @override */ +void wait_for_debugger(boost::mpi::communicator const& comm); diff --git a/src/boost/libs/mpi/test/gather_test.cpp b/src/boost/libs/mpi/test/gather_test.cpp new file mode 100644 index 000000000..9bef7acd2 --- /dev/null +++ b/src/boost/libs/mpi/test/gather_test.cpp @@ -0,0 +1,165 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the gather() and gatherv() collectives. +#include <boost/mpi/collectives/gather.hpp> +#include <boost/mpi/collectives/gatherv.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include "gps_position.hpp" +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> + +#define BOOST_TEST_MODULE mpi_gather +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +template<typename Generator> +void +gather_test(const communicator& comm, Generator generator, + const char* kind, int root = -1) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + gather_test(comm, generator, kind, root); + } else { + using boost::mpi::gather; + + std::vector<value_type> values; + if (comm.rank() == root) { + std::cout << "Gathering " << kind << " from root " + << root << "..." << std::endl; + } + + gather(comm, value, values, root); + + if (comm.rank() == root) { + std::vector<value_type> expected_values; + for (int p = 0; p < comm.size(); ++p) + expected_values.push_back(generator(p)); + BOOST_CHECK(values == expected_values); + } else { + BOOST_CHECK(values.empty()); + } + } + + (comm.barrier)(); +} + + +template<typename Generator> +void +gatherv_test(const communicator& comm, Generator generator, + const char* kind, int root = -1) +{ + typedef typename Generator::result_type value_type; + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + gatherv_test(comm, generator, kind, root); + } else { + using boost::mpi::gatherv; + + int mysize = comm.rank() + 1; + int nprocs = comm.size(); + + // process p will send p+1 identical generator(p) elements + std::vector<value_type> myvalues(mysize, generator(comm.rank())); + + if (comm.rank() == root) { + std::vector<value_type> values((nprocs*(nprocs+1))/2); + std::vector<int> sizes(comm.size()); + for (int p = 0; p < comm.size(); ++p) + sizes[p] = p + 1; + + std::cout << "Gatheringv " << kind << " from root " + << root << "..." << std::endl; + + gatherv(comm, myvalues, &values[0], sizes, root); + + std::vector<value_type> expected_values; + for (int p = 0; p < comm.size(); ++p) + for (int i = 0; i < p+1; ++i) + expected_values.push_back(generator(p)); + + BOOST_CHECK(values == expected_values); + } else { + gatherv(comm, myvalues, root); + } + } + + (comm.barrier)(); +} + +// +// Generators to test with gather/gatherv +// +struct int_generator +{ + typedef int result_type; + + int operator()(int p) const { return 17 + p; } +}; + +struct gps_generator +{ + typedef gps_position result_type; + + gps_position operator()(int p) const + { + return gps_position(39 + p, 16, 20.2799); + } +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct string_list_generator +{ + typedef std::list<std::string> result_type; + + std::list<std::string> operator()(int p) const + { + std::list<std::string> result; + for (int i = 0; i <= p; ++i) { + std::string value = boost::lexical_cast<std::string>(i); + result.push_back(value); + } + return result; + } +}; + +BOOST_AUTO_TEST_CASE(gather_check) +{ + boost::mpi::environment env; + communicator comm; + + gather_test(comm, int_generator(), "integers"); + gather_test(comm, gps_generator(), "GPS positions"); + gather_test(comm, string_generator(), "string"); + gather_test(comm, string_list_generator(), "list of strings"); + + gatherv_test(comm, int_generator(), "integers"); + gatherv_test(comm, gps_generator(), "GPS positions"); + gatherv_test(comm, string_generator(), "string"); + gatherv_test(comm, string_list_generator(), "list of strings"); +} diff --git a/src/boost/libs/mpi/test/gps_position.hpp b/src/boost/libs/mpi/test/gps_position.hpp new file mode 100644 index 000000000..e6910c444 --- /dev/null +++ b/src/boost/libs/mpi/test/gps_position.hpp @@ -0,0 +1,69 @@ +#ifndef GPS_POSITION_HPP +#define GPS_POSITION_HPP + +// Copyright Matthias Troyer +// 2005. Distributed under the Boost Software License, Version +// 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <boost/mpi/datatype_fwd.hpp> +#include <boost/mpl/and.hpp> +#include <boost/serialization/export.hpp> +#include <boost/shared_ptr.hpp> +#include <iostream> + +class gps_position +{ +private: + friend class boost::serialization::access; + // When the class Archive corresponds to an output archive, the + // & operator is defined similar to <<. Likewise, when the class Archive + // is a type of input archive the & operator is defined similar to >>. + template<class Archive> + void serialize(Archive & ar, const unsigned int version) + { + ar & degrees & minutes & seconds; + } + int degrees; + int minutes; + float seconds; +public: + gps_position(){}; + gps_position(int d, int m, float s) : + degrees(d), minutes(m), seconds(s) + {} + + friend std::ostream& operator<<(std::ostream& out, const gps_position& g); + + friend bool operator==(const gps_position& x, const gps_position& y) + { + return (x.degrees == y.degrees + && x.minutes == y.minutes + && x.seconds == y.seconds); + } + + inline friend bool operator!=(const gps_position& x, const gps_position& y) + { + return !(x == y); + } +}; + +inline +std::ostream& operator<<(std::ostream& out, const gps_position& g) { + out << "gps{" << g.degrees << 'd' << g.minutes << 'm' << g.seconds << "s}"; + return out; +} + +namespace boost { namespace mpi { + + template <> + struct is_mpi_datatype<gps_position> + : public mpl::and_ + < + is_mpi_datatype<int>, + is_mpi_datatype<float> + > + {}; + +} } +#endif diff --git a/src/boost/libs/mpi/test/graph_topology_test.cpp b/src/boost/libs/mpi/test/graph_topology_test.cpp new file mode 100644 index 000000000..4d781265c --- /dev/null +++ b/src/boost/libs/mpi/test/graph_topology_test.cpp @@ -0,0 +1,141 @@ +// Copyright (C) 2007 Trustees of Indiana University + +// Authors: Douglas Gregor +// Andrew Lumsdaine + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the communicator that passes data around a ring and +// verifies that the same data makes it all the way. Should test all +// of the various kinds of data that can be sent (primitive types, POD +// types, serializable objects, etc.) +#include <boost/mpi/graph_communicator.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/graph/adjacency_list.hpp> +#include <boost/lexical_cast.hpp> +#include <boost/graph/erdos_renyi_generator.hpp> +#include <boost/random/linear_congruential.hpp> +#include <boost/graph/iteration_macros.hpp> +#include <boost/graph/isomorphism.hpp> +#include <algorithm> // for random_shuffle +#include <boost/serialization/vector.hpp> +#include <boost/mpi/collectives/broadcast.hpp> +#include <boost/config.hpp> + +#define BOOST_TEST_MODULE mpi_graph_topology +#include <boost/test/included/unit_test.hpp> + +#if defined(BOOST_NO_CXX98_RANDOM_SHUFFLE) + +#include <random> + +std::default_random_engine gen; + +template<typename RandomIt> void random_shuffle( RandomIt first, RandomIt last ) +{ + std::shuffle( first, last, gen ); +} + +#else + +using std::random_shuffle; + +#endif // #if defined(BOOST_NO_CXX98_RANDOM_SHUFFLE) + + +using boost::mpi::communicator; +using boost::mpi::graph_communicator; +using namespace boost; + +BOOST_AUTO_TEST_CASE(graph_topology) +{ + boost::function_requires< IncidenceGraphConcept<graph_communicator> >(); + boost::function_requires< AdjacencyGraphConcept<graph_communicator> >(); + boost::function_requires< VertexListGraphConcept<graph_communicator> >(); + boost::function_requires< EdgeListGraphConcept<graph_communicator> >(); + + double prob = 0.1; + + boost::mpi::environment env; + communicator world; + + // Random number generator + minstd_rand gen; + + // Build a random graph with as many vertices as there are processes + typedef adjacency_list<listS, vecS, bidirectionalS> Graph; + sorted_erdos_renyi_iterator<minstd_rand, Graph> + first(gen, world.size(), prob), last; + Graph graph(first, last, world.size()); + + // Display the original graph + if (world.rank() == 0) { + std::cout << "Original, random graph:\n"; + BGL_FORALL_VERTICES(v, graph, Graph) { + BGL_FORALL_OUTEDGES(v, e, graph, Graph) { + std::cout << source(e, graph) << " -> " << target(e, graph) + << std::endl; + } + } + } + + // Create an arbitrary mapping from vertices to integers + typedef property_map<Graph, vertex_index_t>::type GraphVertexIndexMap; + std::vector<int> graph_alt_index_vec(num_vertices(graph)); + iterator_property_map<int*, GraphVertexIndexMap> + graph_alt_index(&graph_alt_index_vec[0], get(vertex_index, graph)); + + // Rank 0 will populate the alternative index vector + if (world.rank() == 0) { + int index = 0; + BGL_FORALL_VERTICES(v, graph, Graph) + put(graph_alt_index, v, index++); + + ::random_shuffle(graph_alt_index_vec.begin(), graph_alt_index_vec.end()); + } + broadcast(world, graph_alt_index_vec, 0); + + // Display the original graph with the remapping + if (world.rank() == 0) { + std::cout << "Original, random graph with remapped vertex numbers:\n"; + BGL_FORALL_VERTICES(v, graph, Graph) { + BGL_FORALL_OUTEDGES(v, e, graph, Graph) { + std::cout << get(graph_alt_index, source(e, graph)) << " -> " + << get(graph_alt_index, target(e, graph)) << std::endl; + } + } + } + + // Create a communicator with a topology equivalent to the graph + graph_communicator graph_comm(world, graph, graph_alt_index, false); + + // The communicator's topology should have the same number of + // vertices and edges and the original graph + BOOST_CHECK((int)num_vertices(graph) == num_vertices(graph_comm)); + BOOST_CHECK((int)num_edges(graph) == num_edges(graph_comm)); + + // Display the communicator graph + if (graph_comm.rank() == 0) { + std::cout << "Communicator graph:\n"; + BGL_FORALL_VERTICES(v, graph_comm, graph_communicator) { + BGL_FORALL_OUTEDGES(v, e, graph_comm, graph_communicator) { + std::cout << source(e, graph_comm) << " -> " << target(e, graph_comm) + << std::endl; + } + } + + std::cout << "Communicator graph via edges():\n"; + BGL_FORALL_EDGES(e, graph_comm, graph_communicator) + std::cout << source(e, graph_comm) << " -> " << target(e, graph_comm) + << std::endl; + } + (graph_comm.barrier)(); + + // Verify the isomorphism + if (graph_comm.rank() == 0) + std::cout << "Verifying isomorphism..." << std::endl; + BOOST_CHECK(verify_isomorphism(graph, graph_comm, graph_alt_index)); +} diff --git a/src/boost/libs/mpi/test/groups_test.cpp b/src/boost/libs/mpi/test/groups_test.cpp new file mode 100644 index 000000000..ce81d182d --- /dev/null +++ b/src/boost/libs/mpi/test/groups_test.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2013 Andreas Hehn <hehn@phys.ethz.ch>, ETH Zurich + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of communicators created from groups. + +#include <boost/mpi/environment.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/group.hpp> +#include <vector> +#include <algorithm> + +#define BOOST_TEST_MODULE mpi_group_test +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +template <typename T> +struct iota +{ + iota() : state(0){}; + T operator()() + { + return state++; + } + T state; +}; + +void group_test(const mpi::communicator& comm) +{ + std::vector<int> grp_a_ranks(comm.size() / 2); + std::generate(grp_a_ranks.begin(),grp_a_ranks.end(),iota<int>()); + + mpi::group grp_a = comm.group().include(grp_a_ranks.begin(),grp_a_ranks.end()); + mpi::group grp_b = comm.group().exclude(grp_a_ranks.begin(),grp_a_ranks.end()); + + mpi::communicator part_a(comm,grp_a); + mpi::communicator part_b(comm,grp_b); + + if(part_a) + { + std::cout << "comm rank: " << comm.rank() << " -> part_a rank:" << part_a.rank() << std::endl; + BOOST_CHECK(part_a.rank() == comm.rank()); + } + if(part_b) + { + std::cout << "comm rank: " << comm.rank() << " -> part_b rank:" << part_b.rank() << std::endl; + BOOST_CHECK(part_b.rank() == comm.rank() - comm.size()/2); + } +} + +BOOST_AUTO_TEST_CASE(group) +{ + mpi::environment env; + mpi::communicator comm; + group_test(comm); +} diff --git a/src/boost/libs/mpi/test/is_mpi_op_test.cpp b/src/boost/libs/mpi/test/is_mpi_op_test.cpp new file mode 100644 index 000000000..d023c3f76 --- /dev/null +++ b/src/boost/libs/mpi/test/is_mpi_op_test.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2005-2006 Douglas Gregor <doug.gregor@gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the is_mpi_op functionality. +#include <boost/mpi/operations.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/type_traits/is_base_and_derived.hpp> + +#define BOOST_TEST_MODULE mpi_is_mpi_op_test +#include <boost/test/included/unit_test.hpp> + +using namespace boost::mpi; +using namespace std; +using boost::is_base_and_derived; + +BOOST_AUTO_TEST_CASE(mpi_basic_op) +{ + boost::mpi::environment env; + + // Check each predefined MPI_Op type that we support directly. + BOOST_TEST((is_mpi_op<minimum<float>, float>::op() == MPI_MIN)); + BOOST_TEST((is_mpi_op<plus<double>, double>::op() == MPI_SUM)); + BOOST_TEST((is_mpi_op<multiplies<long>, long>::op() == MPI_PROD)); + BOOST_TEST((is_mpi_op<logical_and<int>, int>::op() == MPI_LAND)); + BOOST_TEST((is_mpi_op<bitwise_and<int>, int>::op() == MPI_BAND)); + BOOST_TEST((is_mpi_op<logical_or<int>, int>::op() == MPI_LOR)); + BOOST_TEST((is_mpi_op<bitwise_or<int>, int>::op() == MPI_BOR)); + BOOST_TEST((is_mpi_op<logical_xor<int>, int>::op() == MPI_LXOR)); + BOOST_TEST((is_mpi_op<bitwise_xor<int>, int>::op() == MPI_BXOR)); +} diff --git a/src/boost/libs/mpi/test/mt_init_test.cpp b/src/boost/libs/mpi/test/mt_init_test.cpp new file mode 100644 index 000000000..cfd0f57b4 --- /dev/null +++ b/src/boost/libs/mpi/test/mt_init_test.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// test threading::level operations + +#include <boost/mpi.hpp> +#include <iostream> +#include <sstream> + +#define BOOST_TEST_MODULE mpi_mt_init +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +void +test_mt_init(std::string s) +{ + mpi::threading::level required = mpi::threading::level(-1); + std::istringstream in(s); + in >> required; + BOOST_CHECK(!in.bad()); + BOOST_CHECK(mpi::environment::thread_level() >= mpi::threading::single); + BOOST_CHECK(mpi::environment::thread_level() <= mpi::threading::multiple); +} + +BOOST_AUTO_TEST_CASE(mt_init) +{ + mpi::environment env; + mpi::communicator comm; + test_mt_init("single"); + test_mt_init("funneled"); + test_mt_init("serialized"); + test_mt_init("multiple"); +} diff --git a/src/boost/libs/mpi/test/mt_level_test.cpp b/src/boost/libs/mpi/test/mt_level_test.cpp new file mode 100644 index 000000000..a72e9a8a5 --- /dev/null +++ b/src/boost/libs/mpi/test/mt_level_test.cpp @@ -0,0 +1,108 @@ +// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// test threading::level operations + +#include <boost/mpi/environment.hpp> +#include <iostream> +#include <sstream> + +#define BOOST_TEST_MODULE mpi_level_test +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +void +test_threading_level_io(mpi::threading::level orig) { + std::ostringstream out; + namespace mt = boost::mpi::threading; + mt::level printed = mt::level(-1); + + out << orig; + BOOST_CHECK(out.good()); + std::string orig_str(out.str()); + std::cout << "orig string:" << orig_str << '\n'; + std::istringstream in(orig_str); + in >> printed; + BOOST_CHECK(!in.bad()); + std::cout << "orig: " << orig << ", printed: " << printed << std::endl; + BOOST_CHECK(orig == printed); +} + +void +test_threading_levels_io() { + namespace mt = boost::mpi::threading; + test_threading_level_io(mt::single); + test_threading_level_io(mt::funneled); + test_threading_level_io(mt::serialized); + test_threading_level_io(mt::multiple); +} + +void +test_threading_level_cmp() { + namespace mt = boost::mpi::threading; + BOOST_CHECK(mt::single == mt::single); + BOOST_CHECK(mt::funneled == mt::funneled); + BOOST_CHECK(mt::serialized == mt::serialized); + BOOST_CHECK(mt::multiple == mt::multiple); + + BOOST_CHECK(mt::single != mt::funneled); + BOOST_CHECK(mt::single != mt::serialized); + BOOST_CHECK(mt::single != mt::multiple); + + BOOST_CHECK(mt::funneled != mt::single); + BOOST_CHECK(mt::funneled != mt::serialized); + BOOST_CHECK(mt::funneled != mt::multiple); + + BOOST_CHECK(mt::serialized != mt::single); + BOOST_CHECK(mt::serialized != mt::funneled); + BOOST_CHECK(mt::serialized != mt::multiple); + + BOOST_CHECK(mt::multiple != mt::single); + BOOST_CHECK(mt::multiple != mt::funneled); + BOOST_CHECK(mt::multiple != mt::serialized); + + BOOST_CHECK(mt::single < mt::funneled); + BOOST_CHECK(mt::funneled > mt::single); + BOOST_CHECK(mt::single < mt::serialized); + BOOST_CHECK(mt::serialized > mt::single); + BOOST_CHECK(mt::single < mt::multiple); + BOOST_CHECK(mt::multiple > mt::single); + + BOOST_CHECK(mt::funneled < mt::serialized); + BOOST_CHECK(mt::serialized > mt::funneled); + BOOST_CHECK(mt::funneled < mt::multiple); + BOOST_CHECK(mt::multiple > mt::funneled); + + BOOST_CHECK(mt::serialized < mt::multiple); + BOOST_CHECK(mt::multiple > mt::serialized); + + BOOST_CHECK(mt::single <= mt::single); + BOOST_CHECK(mt::single <= mt::funneled); + BOOST_CHECK(mt::funneled >= mt::single); + BOOST_CHECK(mt::single <= mt::serialized); + BOOST_CHECK(mt::serialized >= mt::single); + BOOST_CHECK(mt::single <= mt::multiple); + BOOST_CHECK(mt::multiple >= mt::single); + + BOOST_CHECK(mt::funneled <= mt::funneled); + BOOST_CHECK(mt::funneled <= mt::serialized); + BOOST_CHECK(mt::serialized >= mt::funneled); + BOOST_CHECK(mt::funneled <= mt::multiple); + BOOST_CHECK(mt::multiple >= mt::funneled); + + BOOST_CHECK(mt::serialized <= mt::serialized); + BOOST_CHECK(mt::serialized <= mt::multiple); + BOOST_CHECK(mt::multiple >= mt::serialized); + + BOOST_CHECK(mt::multiple <= mt::multiple); +} + +BOOST_AUTO_TEST_CASE(mt_level) +{ + test_threading_levels_io(); + test_threading_level_cmp(); +} diff --git a/src/boost/libs/mpi/test/non_blocking_any_source.cpp b/src/boost/libs/mpi/test/non_blocking_any_source.cpp new file mode 100644 index 000000000..dde8f92c6 --- /dev/null +++ b/src/boost/libs/mpi/test/non_blocking_any_source.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018 Steffen Hirschmann + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Test any_source on serialized data +#include <vector> +#include <iostream> +#include <iterator> +#include <boost/mpi.hpp> +#include <boost/serialization/vector.hpp> + +#define BOOST_TEST_MODULE mpi_non_blockin_any_source +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +std::string ok(bool b) { + return b ? "ok" : "ko"; +} + +BOOST_AUTO_TEST_CASE(non_blocking_any) +{ + mpi::environment env; + mpi::communicator world; + int rank = world.rank(); +#if BOOST_MPI_VERSION < 3 + if (rank == 0) { + std::cout << "\nExpected failure with MPI standard < 3 (" + << BOOST_MPI_VERSION << "." << BOOST_MPI_SUBVERSION + << " detected)\n\n"; + } + return; +#endif + if (rank == 0) { + std::vector<boost::mpi::request> req; + std::vector<std::vector<int> > data(world.size() - 1); + for (int i = 1; i < world.size(); ++i) { + req.push_back(world.irecv(mpi::any_source, 0, data[i - 1])); + } + boost::mpi::wait_all(req.begin(), req.end()); + std::vector<bool> check(world.size()-1, false); + for (int i = 0; i < world.size() - 1; ++i) { + std::cout << "Process 0 received:" << std::endl; + std::copy(data[i].begin(), data[i].end(), std::ostream_iterator<int>(std::cout, " ")); + std::cout << std::endl; + int idx = data[i].size(); + BOOST_CHECK(std::equal_range(data[i].begin(), data[i].end(), idx) + == std::make_pair(data[i].begin(), data[i].end())); + check[idx-1] = true; + } + for(int i = 0; i < world.size() - 1; ++i) { + std::cout << "Received from " << i+1 << " is " << ok(check[i]) << '\n'; + } + BOOST_CHECK(std::equal_range(check.begin(), check.end(), true) + == std::make_pair(check.begin(), check.end())); + } else { + std::vector<int> vec(rank, rank); + mpi::request req = world.isend(0, 0, vec); + req.wait(); + } +} diff --git a/src/boost/libs/mpi/test/nonblocking_test.cpp b/src/boost/libs/mpi/test/nonblocking_test.cpp new file mode 100644 index 000000000..8e3eb099f --- /dev/null +++ b/src/boost/libs/mpi/test/nonblocking_test.cpp @@ -0,0 +1,247 @@ +// Copyright (C) 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the nonblocking point-to-point operations. +#include <boost/mpi/nonblocking.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include "gps_position.hpp" +#include <boost/lexical_cast.hpp> +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <iterator> +#include <algorithm> +//#include "debugger.cpp" + +#define BOOST_TEST_MODULE mpi_non_blockin_test +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; +using boost::mpi::request; +using boost::mpi::status; + +enum method_kind { + mk_wait_any, mk_test_any, mk_wait_all, mk_wait_all_keep, + mk_test_all, mk_test_all_keep, mk_wait_some, mk_wait_some_keep, + mk_test_some, mk_test_some_keep, + mk_test_size +}; + +static const char* method_kind_names[mk_test_size] = { + "wait_any", + "test_any", + "wait_all", + "wait_all (keep results)", + "test_all", + "test_all (keep results)", + "wait_some", + "wait_some (keep results)", + "test_some", + "test_some (keep results)" +}; + + +template<typename T> +void +nonblocking_tests( const communicator& comm, const T* values, int num_values, + const char* kind, bool composite) +{ + nonblocking_test(comm, values, num_values, kind, mk_wait_any); + nonblocking_test(comm, values, num_values, kind, mk_test_any); + //wait_for_debugger(comm); + nonblocking_test(comm, values, num_values, kind, mk_wait_all); + nonblocking_test(comm, values, num_values, kind, mk_wait_all_keep); + if (!composite) { + nonblocking_test(comm, values, num_values, kind, mk_test_all); + nonblocking_test(comm, values, num_values, kind, mk_test_all_keep); + } + nonblocking_test(comm, values, num_values, kind, mk_wait_some); + nonblocking_test(comm, values, num_values, kind, mk_wait_some_keep); + nonblocking_test(comm, values, num_values, kind, mk_test_some); + nonblocking_test(comm, values, num_values, kind, mk_test_some_keep); +} + +template<typename T> +void +nonblocking_test(const communicator& comm, const T* values, int num_values, + const char* kind, method_kind method) +{ + using boost::mpi::wait_any; + using boost::mpi::test_any; + using boost::mpi::wait_all; + using boost::mpi::test_all; + using boost::mpi::wait_some; + using boost::mpi::test_some; + + int next = (comm.rank() + 1) % comm.size(); + int prev = (comm.rank() + comm.size() - 1) % comm.size(); + + if (comm.rank() == 0) { + std::cout << "Testing " << method_kind_names[method] + << " with " << kind << "..."; + std::cout.flush(); + } + + typedef std::pair<status, std::vector<request>::iterator> + status_iterator_pair; + + T incoming_value; + std::vector<T> incoming_values(num_values); + std::vector<request> reqs; + // Send/receive the first value + reqs.push_back(comm.isend(next, 0, values[0])); + reqs.push_back(comm.irecv(prev, 0, incoming_value)); + + if (method != mk_wait_any && method != mk_test_any) { +#ifndef LAM_MPI + // We've run into problems here (with 0-length messages) with + // LAM/MPI on Mac OS X and x86-86 Linux. Will investigate + // further at a later time, but the problem only seems to occur + // when using shared memory, not TCP. + + // Send/receive an empty message + reqs.push_back(comm.isend(next, 1)); + reqs.push_back(comm.irecv(prev, 1)); +#endif + + // Send/receive an array + reqs.push_back(comm.isend(next, 2, values, num_values)); + reqs.push_back(comm.irecv(prev, 2, &incoming_values.front(), num_values)); + } + + switch (method) { + case mk_wait_any: + if (wait_any(reqs.begin(), reqs.end()).second == reqs.begin()) + reqs[1].wait(); + else + reqs[0].wait(); + break; + + case mk_test_any: + { + boost::optional<status_iterator_pair> result; + do { + result = test_any(reqs.begin(), reqs.end()); + } while (!result); + if (result->second == reqs.begin()) + reqs[1].wait(); + else + reqs[0].wait(); + break; + } + + case mk_wait_all: + wait_all(reqs.begin(), reqs.end()); + break; + + case mk_wait_all_keep: + { + std::vector<status> stats; + wait_all(reqs.begin(), reqs.end(), std::back_inserter(stats)); + } + break; + + case mk_test_all: + while (!test_all(reqs.begin(), reqs.end())) { /* Busy wait */ } + break; + + case mk_test_all_keep: + { + std::vector<status> stats; + while (!test_all(reqs.begin(), reqs.end(), std::back_inserter(stats))) + /* Busy wait */; + } + break; + + case mk_wait_some: + { + std::vector<request>::iterator pos = reqs.end(); + do { + pos = wait_some(reqs.begin(), pos); + } while (pos != reqs.begin()); + } + break; + + case mk_wait_some_keep: + { + std::vector<status> stats; + std::vector<request>::iterator pos = reqs.end(); + do { + pos = wait_some(reqs.begin(), pos, std::back_inserter(stats)).second; + } while (pos != reqs.begin()); + } + break; + + case mk_test_some: + { + std::vector<request>::iterator pos = reqs.end(); + do { + pos = test_some(reqs.begin(), pos); + } while (pos != reqs.begin()); + } + break; + + case mk_test_some_keep: + { + std::vector<status> stats; + std::vector<request>::iterator pos = reqs.end(); + do { + pos = test_some(reqs.begin(), pos, std::back_inserter(stats)).second; + } while (pos != reqs.begin()); + } + break; + + default: + BOOST_CHECK(false); + } + + if (comm.rank() == 0) { + bool okay = true; + + if (!((incoming_value == values[0]))) + okay = false; + + if (method != mk_wait_any && method != mk_test_any + && !std::equal(incoming_values.begin(), incoming_values.end(), + values)) + okay = false; + + if (okay) + std::cout << "OK." << std::endl; + else + std::cerr << "ERROR!" << std::endl; + } + + BOOST_CHECK(incoming_value == values[0]); + + if (method != mk_wait_any && method != mk_test_any) + BOOST_CHECK(std::equal(incoming_values.begin(), incoming_values.end(), + values)); +} + +BOOST_AUTO_TEST_CASE(nonblocking) +{ + boost::mpi::environment env; + communicator comm; + + int int_array[3] = {17, 42, 256}; + nonblocking_tests(comm, int_array, 3, "integers", false); + + gps_position gps_array[2] = { + gps_position(17, 42, .06), + gps_position(42, 17, .06) + }; + nonblocking_tests(comm, gps_array, 2, "gps positions", false); + + std::string string_array[2] = { "Hello", "World" }; + nonblocking_tests(comm, string_array, 2, "strings", true); + + std::list<std::string> lst_of_strings; + for (int i = 0; i < comm.size(); ++i) + lst_of_strings.push_back(boost::lexical_cast<std::string>(i)); + + nonblocking_tests(comm, &lst_of_strings, 1, "list of strings", true); +} diff --git a/src/boost/libs/mpi/test/pointer_test.cpp b/src/boost/libs/mpi/test/pointer_test.cpp new file mode 100644 index 000000000..65428e752 --- /dev/null +++ b/src/boost/libs/mpi/test/pointer_test.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + + +// a test of pointer serialization +#include <boost/mpi.hpp> +#include <boost/serialization/shared_ptr.hpp> + +#define BOOST_TEST_MODULE mpi_pointer +#include <boost/test/included/unit_test.hpp> + +class A +{ + public: + int i; + template<class Archive> + void serialize(Archive & ar, const unsigned int version) + { + ar & i; + } +}; + +BOOST_AUTO_TEST_CASE(pointer) +{ + boost::mpi::environment env; + boost::mpi::communicator world; + + if (world.rank() == 0) { + boost::shared_ptr<A> p(new A); + p->i = 42; + world.send(1, 0, p); + } else if (world.rank() == 1) { + boost::shared_ptr<A> p; + world.recv(0, 0, p); + std::cout << p->i << std::endl; + BOOST_CHECK(p->i==42); + } +} + diff --git a/src/boost/libs/mpi/test/python/all_gather_test.py b/src/boost/libs/mpi/test/python/all_gather_test.py new file mode 100644 index 000000000..824dc892f --- /dev/null +++ b/src/boost/libs/mpi/test/python/all_gather_test.py @@ -0,0 +1,25 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test all_gather() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def all_gather_test(comm, generator, kind): + if comm.rank == 0: print ("Gathering %s..." % (kind,)), + my_value = generator(comm.rank) + result = mpi.all_gather(comm, my_value) + for p in range(0, comm.size): + assert result[p] == generator(p) + if comm.rank == 0: print "OK." + + return + +all_gather_test(mpi.world, int_generator, "integers") +all_gather_test(mpi.world, gps_generator, "GPS positions") +all_gather_test(mpi.world, string_generator, "strings") +all_gather_test(mpi.world, string_list_generator, "list of strings") diff --git a/src/boost/libs/mpi/test/python/all_reduce_test.py b/src/boost/libs/mpi/test/python/all_reduce_test.py new file mode 100644 index 000000000..c3285e65b --- /dev/null +++ b/src/boost/libs/mpi/test/python/all_reduce_test.py @@ -0,0 +1,29 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test all_reduce() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def all_reduce_test(comm, generator, kind, op, op_kind): + if comm.rank == 0: + print ("Reducing to %s of %s..." % (op_kind, kind)), + my_value = generator(comm.rank) + result = mpi.all_reduce(comm, my_value, op) + expected_result = generator(0); + for p in range(1, comm.size): + expected_result = op(expected_result, generator(p)) + + assert result == expected_result + if comm.rank == 0: + print "OK." + return + +all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum") +all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product") +all_reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation") +all_reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation") diff --git a/src/boost/libs/mpi/test/python/all_to_all_test.py b/src/boost/libs/mpi/test/python/all_to_all_test.py new file mode 100644 index 000000000..b149bf0d3 --- /dev/null +++ b/src/boost/libs/mpi/test/python/all_to_all_test.py @@ -0,0 +1,30 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test all_to_all() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def all_to_all_test(comm, generator, kind): + if comm.rank == 0: + print ("All-to-all transmission of %s..." % (kind,)), + + values = list() + for p in range(0, comm.size): + values.append(generator(p)) + result = mpi.all_to_all(comm, values) + + for p in range(0, comm.size): + assert result[p] == generator(comm.rank) + + if comm.rank == 0: print "OK." + return + +all_to_all_test(mpi.world, int_generator, "integers") +all_to_all_test(mpi.world, gps_generator, "GPS positions") +all_to_all_test(mpi.world, string_generator, "strings") +all_to_all_test(mpi.world, string_list_generator, "list of strings") diff --git a/src/boost/libs/mpi/test/python/broadcast_test.py b/src/boost/libs/mpi/test/python/broadcast_test.py new file mode 100644 index 000000000..dbd53d1be --- /dev/null +++ b/src/boost/libs/mpi/test/python/broadcast_test.py @@ -0,0 +1,29 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test broadcast() collective. + +import boost.parallel.mpi as mpi + +def broadcast_test(comm, value, kind, root): + if comm.rank == root: + print ("Broadcasting %s from root %d..." % (kind, root)), + + got_value = mpi.broadcast(comm, value, root) + assert got_value == value + if comm.rank == root: + print "OK." + return + +broadcast_test(mpi.world, 17, 'integer', 0) +broadcast_test(mpi.world, 17, 'integer', 1) +broadcast_test(mpi.world, 'Hello, World!', 'string', 0) +broadcast_test(mpi.world, 'Hello, World!', 'string', 1) +broadcast_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], + 'list of strings', 0) +broadcast_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], + 'list of strings', 1) + diff --git a/src/boost/libs/mpi/test/python/gather_test.py b/src/boost/libs/mpi/test/python/gather_test.py new file mode 100644 index 000000000..d56b3a416 --- /dev/null +++ b/src/boost/libs/mpi/test/python/gather_test.py @@ -0,0 +1,32 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test gather() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def gather_test(comm, generator, kind, root): + if comm.rank == root: + print ("Gathering %s to root %d..." % (kind, root)), + my_value = generator(comm.rank) + result = mpi.gather(comm, my_value, root) + if comm.rank == root: + for p in range(0, comm.size): + assert result[p] == generator(p) + print "OK." + else: + assert result == None + return + +gather_test(mpi.world, int_generator, "integers", 0) +gather_test(mpi.world, int_generator, "integers", 1) +gather_test(mpi.world, gps_generator, "GPS positions", 0) +gather_test(mpi.world, gps_generator, "GPS positions", 1) +gather_test(mpi.world, string_generator, "strings", 0) +gather_test(mpi.world, string_generator, "strings", 1) +gather_test(mpi.world, string_list_generator, "list of strings", 0) +gather_test(mpi.world, string_list_generator, "list of strings", 1) diff --git a/src/boost/libs/mpi/test/python/generators.py b/src/boost/libs/mpi/test/python/generators.py new file mode 100644 index 000000000..8cdd3e056 --- /dev/null +++ b/src/boost/libs/mpi/test/python/generators.py @@ -0,0 +1,23 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Value generators used in the Boost.MPI Python regression tests +def int_generator(p): + return 17 + p + +def gps_generator(p): + return (39 + p, 16, 20.2799) + +def string_generator(p): + result = "%d rosebud" % p; + if p != 1: result = result + 's' + return result + +def string_list_generator(p): + result = list() + for i in range(0,p): + result.append(str(i)) + return result diff --git a/src/boost/libs/mpi/test/python/nonblocking_test.py b/src/boost/libs/mpi/test/python/nonblocking_test.py new file mode 100644 index 000000000..73b451c53 --- /dev/null +++ b/src/boost/libs/mpi/test/python/nonblocking_test.py @@ -0,0 +1,131 @@ +# (C) Copyright 2007 +# Andreas Kloeckner <inform -at- tiker.net> +# +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) +# +# Authors: Andreas Kloeckner + + + + +import boost.mpi as mpi +import random +import sys + +MAX_GENERATIONS = 20 +TAG_DEBUG = 0 +TAG_DATA = 1 +TAG_TERMINATE = 2 +TAG_PROGRESS_REPORT = 3 + + + + +class TagGroupListener: + """Class to help listen for only a given set of tags. + + This is contrived: Typicallly you could just listen for + mpi.any_tag and filter.""" + def __init__(self, comm, tags): + self.tags = tags + self.comm = comm + self.active_requests = {} + + def wait(self): + for tag in self.tags: + if tag not in self.active_requests: + self.active_requests[tag] = self.comm.irecv(tag=tag) + requests = mpi.RequestList(self.active_requests.values()) + data, status, index = mpi.wait_any(requests) + del self.active_requests[status.tag] + return status, data + + def cancel(self): + for r in self.active_requests.itervalues(): + r.cancel() + #r.wait() + self.active_requests = {} + + + +def rank0(): + sent_histories = (mpi.size-1)*15 + print "sending %d packets on their way" % sent_histories + send_reqs = mpi.RequestList() + for i in range(sent_histories): + dest = random.randrange(1, mpi.size) + send_reqs.append(mpi.world.isend(dest, TAG_DATA, [])) + + mpi.wait_all(send_reqs) + + completed_histories = [] + progress_reports = {} + dead_kids = [] + + tgl = TagGroupListener(mpi.world, + [TAG_DATA, TAG_DEBUG, TAG_PROGRESS_REPORT, TAG_TERMINATE]) + + def is_complete(): + for i in progress_reports.values(): + if i != sent_histories: + return False + return len(dead_kids) == mpi.size-1 + + while True: + status, data = tgl.wait() + + if status.tag == TAG_DATA: + #print "received completed history %s from %d" % (data, status.source) + completed_histories.append(data) + if len(completed_histories) == sent_histories: + print "all histories received, exiting" + for rank in range(1, mpi.size): + mpi.world.send(rank, TAG_TERMINATE, None) + elif status.tag == TAG_PROGRESS_REPORT: + progress_reports[len(data)] = progress_reports.get(len(data), 0) + 1 + elif status.tag == TAG_DEBUG: + print "[DBG %d] %s" % (status.source, data) + elif status.tag == TAG_TERMINATE: + dead_kids.append(status.source) + else: + print "unexpected tag %d from %d" % (status.tag, status.source) + + if is_complete(): + break + + print "OK" + +def comm_rank(): + while True: + data, status = mpi.world.recv(return_status=True) + if status.tag == TAG_DATA: + mpi.world.send(0, TAG_PROGRESS_REPORT, data) + data.append(mpi.rank) + if len(data) >= MAX_GENERATIONS: + dest = 0 + else: + dest = random.randrange(1, mpi.size) + mpi.world.send(dest, TAG_DATA, data) + elif status.tag == TAG_TERMINATE: + from time import sleep + mpi.world.send(0, TAG_TERMINATE, 0) + break + else: + print "[DIRECTDBG %d] unexpected tag %d from %d" % (mpi.rank, status.tag, status.source) + + +def main(): + # this program sends around messages consisting of lists of visited nodes + # randomly. After MAX_GENERATIONS, they are returned to rank 0. + + if mpi.rank == 0: + rank0() + else: + comm_rank() + + + +if __name__ == "__main__": + main() diff --git a/src/boost/libs/mpi/test/python/reduce_test.py b/src/boost/libs/mpi/test/python/reduce_test.py new file mode 100644 index 000000000..65f09f5ba --- /dev/null +++ b/src/boost/libs/mpi/test/python/reduce_test.py @@ -0,0 +1,31 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test reduce() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def reduce_test(comm, generator, kind, op, op_kind, root): + if comm.rank == root: + print ("Reducing to %s of %s at root %d..." % (op_kind, kind, root)), + my_value = generator(comm.rank) + result = mpi.reduce(comm, my_value, op, root) + if comm.rank == root: + expected_result = generator(0); + for p in range(1, comm.size): + expected_result = op(expected_result, generator(p)) + assert result == expected_result + print "OK." + else: + assert result == None + return + +reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum", 0) +reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product", 1) +reduce_test(mpi.world, int_generator, "integers", min, "minimum", 0) +reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation", 0) +reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation", 0) diff --git a/src/boost/libs/mpi/test/python/ring_test.py b/src/boost/libs/mpi/test/python/ring_test.py new file mode 100644 index 000000000..3c8b5b92f --- /dev/null +++ b/src/boost/libs/mpi/test/python/ring_test.py @@ -0,0 +1,42 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test basic communication. + +import boost.parallel.mpi as mpi + +def ring_test(comm, value, kind, root): + next_peer = (comm.rank + 1) % comm.size; + prior_peer = (comm.rank + comm.size - 1) % comm.size; + + if comm.rank == root: + print ("Passing %s around a ring from root %d..." % (kind, root)), + comm.send(next_peer, 0, value) + (other_value, stat) = comm.recv(return_status = True) + assert value == other_value + assert stat.source == prior_peer + assert stat.tag == 0 + else: + msg = comm.probe() + other_value = comm.recv(msg.source, msg.tag) + assert value == other_value + comm.send(next_peer, 0, other_value) + + comm.barrier() + if comm.rank == root: + print "OK" + pass + +if mpi.world.size < 2: + print "ERROR: ring_test.py must be executed with more than one process" + mpi.world.abort(-1); + +ring_test(mpi.world, 17, 'integers', 0) +ring_test(mpi.world, 17, 'integers', 1) +ring_test(mpi.world, 'Hello, World!', 'string', 0) +ring_test(mpi.world, 'Hello, World!', 'string', 1) +ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 0) +ring_test(mpi.world, ['Hello', 'MPI', 'Python', 'World'], 'list of strings', 1) diff --git a/src/boost/libs/mpi/test/python/scan_test.py b/src/boost/libs/mpi/test/python/scan_test.py new file mode 100644 index 000000000..193a6a448 --- /dev/null +++ b/src/boost/libs/mpi/test/python/scan_test.py @@ -0,0 +1,29 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test scan() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def scan_test(comm, generator, kind, op, op_kind): + if comm.rank == 0: + print ("Prefix reduction to %s of %s..." % (op_kind, kind)), + my_value = generator(comm.rank) + result = mpi.scan(comm, my_value, op) + expected_result = generator(0); + for p in range(1, comm.rank+1): + expected_result = op(expected_result, generator(p)) + + assert result == expected_result + if comm.rank == 0: + print "OK." + return + +scan_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum") +scan_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product") +scan_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation") +scan_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation") diff --git a/src/boost/libs/mpi/test/python/scatter_test.py b/src/boost/libs/mpi/test/python/scatter_test.py new file mode 100644 index 000000000..e0bad9673 --- /dev/null +++ b/src/boost/libs/mpi/test/python/scatter_test.py @@ -0,0 +1,36 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test scatter() collective. + +import boost.parallel.mpi as mpi +from generators import * + +def scatter_test(comm, generator, kind, root): + if comm.rank == root: + print ("Scattering %s from root %d..." % (kind, root)), + + if comm.rank == root: + values = list() + for p in range(0, comm.size): + values.append(generator(p)) + result = mpi.scatter(comm, values, root = root) + else: + result = mpi.scatter(comm, root = root); + + assert result == generator(comm.rank) + + if comm.rank == root: print "OK." + return + +scatter_test(mpi.world, int_generator, "integers", 0) +scatter_test(mpi.world, int_generator, "integers", 1) +scatter_test(mpi.world, gps_generator, "GPS positions", 0) +scatter_test(mpi.world, gps_generator, "GPS positions", 1) +scatter_test(mpi.world, string_generator, "strings", 0) +scatter_test(mpi.world, string_generator, "strings", 1) +scatter_test(mpi.world, string_list_generator, "list of strings", 0) +scatter_test(mpi.world, string_list_generator, "list of strings", 1) diff --git a/src/boost/libs/mpi/test/python/skeleton_content_test.cpp b/src/boost/libs/mpi/test/python/skeleton_content_test.cpp new file mode 100644 index 000000000..7ccae00ef --- /dev/null +++ b/src/boost/libs/mpi/test/python/skeleton_content_test.cpp @@ -0,0 +1,37 @@ +// (C) Copyright 2006 Douglas Gregor <doug.gregor -at- gmail.com> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// Authors: Douglas Gregor + +#include <boost/parallel/mpi/python.hpp> +#include <boost/python.hpp> +#include <boost/serialization/list.hpp> +using namespace boost::python; + +template<typename T> +boost::python::list list_to_python(const std::list<T>& value) { + boost::python::list result; + for (typename std::list<T>::const_iterator i = value.begin(); + i != value.end(); ++i) + result.append(*i); + return result; +} + +BOOST_PYTHON_MODULE(skeleton_content) +{ + using boost::python::arg; + + class_<std::list<int> >("list_int") + .def("push_back", &std::list<int>::push_back, arg("value")) + .def("pop_back", &std::list<int>::pop_back) + .def("reverse", &std::list<int>::reverse) + .def(boost::python::self == boost::python::self) + .def(boost::python::self != boost::python::self) + .add_property("size", &std::list<int>::size) + .def("to_python", &list_to_python<int>); + + boost::parallel::mpi::python::register_skeleton_and_content<std::list<int> >(); +} diff --git a/src/boost/libs/mpi/test/python/skeleton_content_test.py b/src/boost/libs/mpi/test/python/skeleton_content_test.py new file mode 100644 index 000000000..1dfde3cf7 --- /dev/null +++ b/src/boost/libs/mpi/test/python/skeleton_content_test.py @@ -0,0 +1,75 @@ +# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +# Use, modification and distribution is subject to the Boost Software +# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) + +# Test skeleton/content + +import boost.parallel.mpi as mpi +import skeleton_content + +def test_skeleton_and_content(comm, root, manual_broadcast = True): + assert manual_broadcast + + # Setup data + list_size = comm.size + 7 + original_list = skeleton_content.list_int() + for i in range(0,list_size): + original_list.push_back(i) + + if comm.rank == root: + # Broadcast skeleton + print ("Broadcasting integer list skeleton from root %d..." % (root)), + if manual_broadcast: + for p in range(0,comm.size): + if p != comm.rank: + comm.send(p, 0, value = mpi.skeleton(original_list)) + print "OK." + + # Broadcast content + print ("Broadcasting integer list content from root %d..." % (root)), + if manual_broadcast: + for p in range(0,comm.size): + if p != comm.rank: + comm.send(p, 0, value = mpi.get_content(original_list)) + + print "OK." + + # Broadcast reversed content + original_list.reverse() + print ("Broadcasting reversed integer list content from root %d..." % (root)), + if manual_broadcast: + for p in range(0,comm.size): + if p != comm.rank: + comm.send(p, 0, value = mpi.get_content(original_list)) + + print "OK." + else: + # Allocate some useless data, to try to get the addresses of + # the underlying lists used later to be different across + # processors. + junk_list = skeleton_content.list_int() + for i in range(0,comm.rank * 3 + 1): + junk_list.push_back(i) + + # Receive the skeleton of the list + if manual_broadcast: + transferred_list_skeleton = comm.recv(root, 0) + assert transferred_list_skeleton.object.size == list_size + + # Receive the content and check it + transferred_list = transferred_list_skeleton.object + if manual_broadcast: + comm.recv(root, 0, mpi.get_content(transferred_list)) + assert transferred_list == original_list + + # Receive the content (again) and check it + original_list.reverse() + if manual_broadcast: + comm.recv(root, 0, mpi.get_content(transferred_list)) + assert transferred_list == original_list + + +test_skeleton_and_content(mpi.world, 0) +test_skeleton_and_content(mpi.world, 1) diff --git a/src/boost/libs/mpi/test/reduce_test.cpp b/src/boost/libs/mpi/test/reduce_test.cpp new file mode 100644 index 000000000..44532cd5b --- /dev/null +++ b/src/boost/libs/mpi/test/reduce_test.cpp @@ -0,0 +1,237 @@ +// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the reduce() collective. +#include <boost/mpi/collectives/reduce.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <algorithm> +#include <boost/serialization/string.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> +#include <numeric> + +#define BOOST_TEST_MODULE mpi_reduce_test +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +// A simple point class that we can build, add, compare, and +// serialize. +struct point +{ + point() : x(0), y(0), z(0) { } + point(int x, int y, int z) : x(x), y(y), z(z) { } + + int x; + int y; + int z; + + private: + template<typename Archiver> + void serialize(Archiver& ar, unsigned int /*version*/) + { + ar & x & y & z; + } + + friend class boost::serialization::access; +}; + +std::ostream& operator<<(std::ostream& out, const point& p) +{ + return out << p.x << ' ' << p.y << ' ' << p.z; +} + +bool operator==(const point& p1, const point& p2) +{ + return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; +} + +bool operator!=(const point& p1, const point& p2) +{ + return !(p1 == p2); +} + +point operator+(const point& p1, const point& p2) +{ + return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); +} + +namespace boost { namespace mpi { + + template <> + struct is_mpi_datatype<point> : public mpl::true_ { }; + +} } // end namespace boost::mpi + +template<typename Generator, typename Op> +void +reduce_test(const communicator& comm, Generator generator, + const char* type_kind, Op op, const char* op_kind, + typename Generator::result_type init, + int root = -1) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + reduce_test(comm, generator, type_kind, op, op_kind, init, root); + } else { + using boost::mpi::reduce; + + if (comm.rank() == root) { + std::cout << "Reducing to " << op_kind << " of " << type_kind + << " at root " << root << "..."; + std::cout.flush(); + + value_type result_value; + reduce(comm, value, result_value, op, root); + + // Compute expected result + std::vector<value_type> generated_values; + for (int p = 0; p < comm.size(); ++p) + generated_values.push_back(generator(p)); + value_type expected_result = std::accumulate(generated_values.begin(), + generated_values.end(), + init, op); + BOOST_CHECK(result_value == expected_result); + if (result_value == expected_result) + std::cout << "OK." << std::endl; + } else { + reduce(comm, value, op, root); + } + } + + (comm.barrier)(); +} + +// Generates integers to test with reduce() +struct int_generator +{ + typedef int result_type; + + int_generator(int base = 1) : base(base) { } + + int operator()(int p) const { return base + p; } + + private: + int base; +}; + +// Generate points to test with reduce() +struct point_generator +{ + typedef point result_type; + + point_generator(point origin) : origin(origin) { } + + point operator()(int p) const + { + return point(origin.x + 1, origin.y + 1, origin.z + 1); + } + + private: + point origin; +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct secret_int_bit_and +{ + int operator()(int x, int y) const { return x & y; } +}; + +struct wrapped_int +{ + wrapped_int() : value(0) { } + explicit wrapped_int(int value) : value(value) { } + + template<typename Archive> + void serialize(Archive& ar, unsigned int /* version */) + { + ar & value; + } + + int value; +}; + +wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) +{ + return wrapped_int(x.value + y.value); +} + +bool operator==(const wrapped_int& x, const wrapped_int& y) +{ + return x.value == y.value; +} + +// Generates wrapped_its to test with reduce() +struct wrapped_int_generator +{ + typedef wrapped_int result_type; + + wrapped_int_generator(int base = 1) : base(base) { } + + wrapped_int operator()(int p) const { return wrapped_int(base + p); } + + private: + int base; +}; + +namespace boost { namespace mpi { + +// Make std::plus<wrapped_int> commutative. +template<> +struct is_commutative<std::plus<wrapped_int>, wrapped_int> + : mpl::true_ { }; + +} } // end namespace boost::mpi + +BOOST_AUTO_TEST_CASE(reduce_check) +{ + using namespace boost::mpi; + environment env; + + communicator comm; + + // Built-in MPI datatypes with built-in MPI operations + reduce_test(comm, int_generator(), "integers", std::plus<int>(), "sum", 0); + reduce_test(comm, int_generator(), "integers", std::multiplies<int>(), + "product", 1); + reduce_test(comm, int_generator(), "integers", maximum<int>(), + "maximum", 0); + reduce_test(comm, int_generator(), "integers", minimum<int>(), + "minimum", 2); + + // User-defined MPI datatypes with operations that have the + // same name as built-in operations. + reduce_test(comm, point_generator(point(0,0,0)), "points", + std::plus<point>(), "sum", point()); + + // Built-in MPI datatypes with user-defined operations + reduce_test(comm, int_generator(17), "integers", secret_int_bit_and(), + "bitwise and", -1); + + // Arbitrary types with user-defined, commutative operations. + reduce_test(comm, wrapped_int_generator(17), "wrapped integers", + std::plus<wrapped_int>(), "sum", wrapped_int(0)); + + // Arbitrary types with (non-commutative) user-defined operations + reduce_test(comm, string_generator(), "strings", + std::plus<std::string>(), "concatenation", std::string()); +} diff --git a/src/boost/libs/mpi/test/ring_test.cpp b/src/boost/libs/mpi/test/ring_test.cpp new file mode 100644 index 000000000..573d3d387 --- /dev/null +++ b/src/boost/libs/mpi/test/ring_test.cpp @@ -0,0 +1,124 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the communicator that passes data around a ring and +// verifies that the same data makes it all the way. Should test all +// of the various kinds of data that can be sent (primitive types, POD +// types, serializable objects, etc.) +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <algorithm> +#include "gps_position.hpp" +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +//#include "debugger.cpp" + +#define BOOST_TEST_MODULE mpi_reduce_ring +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; +using boost::mpi::status; + +template<typename T> +void +ring_test(const communicator& comm, const T& pass_value, const char* kind, + int root = 0) +{ + T transferred_value; + + int rank = comm.rank(); + int size = comm.size(); + + if (rank == root) { + std::cout << "Passing " << kind << " around a ring from root " << root + << "..."; + comm.send((rank + 1) % size, 0, pass_value); + comm.recv((rank + size - 1) % size, 0, transferred_value); + BOOST_CHECK(transferred_value == pass_value); + if (transferred_value == pass_value) std::cout << " OK." << std::endl; + } else { + comm.recv((rank + size - 1) % size, 0, transferred_value); + BOOST_CHECK(transferred_value == pass_value); + comm.send((rank + 1) % size, 0, transferred_value); + } + + (comm.barrier)(); +} + + +template<typename T> +void +ring_array_test(const communicator& comm, const T* pass_values, + int n, const char* kind, int root = 0) +{ + T* transferred_values = new T[n]; + int rank = comm.rank(); + int size = comm.size(); + + if (rank == root) { + + std::cout << "Passing " << kind << " array around a ring from root " + << root << "..."; + comm.send((rank + 1) % size, 0, pass_values, n); + comm.recv((rank + size - 1) % size, 0, transferred_values, n); + bool okay = std::equal(pass_values, pass_values + n, + transferred_values); + BOOST_CHECK(okay); + if (okay) std::cout << " OK." << std::endl; + } else { + status stat = comm.probe(boost::mpi::any_source, 0); + boost::optional<int> num_values = stat.template count<T>(); + if (boost::mpi::is_mpi_datatype<T>()) + BOOST_CHECK(num_values && *num_values == n); + else + BOOST_CHECK(!num_values || *num_values == n); + comm.recv(stat.source(), 0, transferred_values, n); + BOOST_CHECK(std::equal(pass_values, pass_values + n, + transferred_values)); + comm.send((rank + 1) % size, 0, transferred_values, n); + } + (comm.barrier)(); + delete [] transferred_values; +} + +enum color_t {red, green, blue}; +BOOST_IS_MPI_DATATYPE(color_t) + +BOOST_AUTO_TEST_CASE(ring) +{ + boost::mpi::environment env; + communicator comm; + + BOOST_TEST_REQUIRE(comm.size() > 1); + + // Check transfer of individual objects + ring_test(comm, 17, "integers", 0); + ring_test(comm, 17, "integers", 1); + ring_test(comm, red, "enums", 1); + ring_test(comm, red, "enums", 1); + ring_test(comm, gps_position(39,16,20.2799), "GPS positions", 0); + ring_test(comm, gps_position(26,25,30.0), "GPS positions", 1); + ring_test(comm, std::string("Rosie"), "string", 0); + + std::list<std::string> strings; + strings.push_back("Hello"); + strings.push_back("MPI"); + strings.push_back("World"); + ring_test(comm, strings, "list of strings", 1); + + // Check transfer of arrays + int int_array[2] = { 17, 42 }; + ring_array_test(comm, int_array, 2, "integer", 1); + gps_position gps_position_array[2] = { + gps_position(39,16,20.2799), + gps_position(26,25,30.0) + }; + ring_array_test(comm, gps_position_array, 2, "GPS position", 1); + + std::string string_array[3] = { "Hello", "MPI", "World" }; + ring_array_test(comm, string_array, 3, "string", 0); + ring_array_test(comm, string_array, 3, "string", 1); +} diff --git a/src/boost/libs/mpi/test/scan_test.cpp b/src/boost/libs/mpi/test/scan_test.cpp new file mode 100644 index 000000000..579ce8ded --- /dev/null +++ b/src/boost/libs/mpi/test/scan_test.cpp @@ -0,0 +1,228 @@ +// Copyright (C) 2005, 2006 Douglas Gregor <doug.gregor -at- gmail.com>. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the scan() collective. +#include <boost/mpi/collectives/scan.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <algorithm> +#include <boost/serialization/string.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> +#include <numeric> + +#define BOOST_TEST_MODULE mpi_scan_test +#include <boost/test/included/unit_test.hpp> + + +using boost::mpi::communicator; + +// A simple point class that we can build, add, compare, and +// serialize. +struct point +{ + point() : x(0), y(0), z(0) { } + point(int x, int y, int z) : x(x), y(y), z(z) { } + + int x; + int y; + int z; + + private: + template<typename Archiver> + void serialize(Archiver& ar, unsigned int /*version*/) + { + ar & x & y & z; + } + + friend class boost::serialization::access; +}; + +std::ostream& operator<<(std::ostream& out, const point& p) +{ + return out << p.x << ' ' << p.y << ' ' << p.z; +} + +bool operator==(const point& p1, const point& p2) +{ + return p1.x == p2.x && p1.y == p2.y && p1.z == p2.z; +} + +bool operator!=(const point& p1, const point& p2) +{ + return !(p1 == p2); +} + +point operator+(const point& p1, const point& p2) +{ + return point(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z); +} + +namespace boost { namespace mpi { + + template <> + struct is_mpi_datatype<point> : public mpl::true_ { }; + +} } // end namespace boost::mpi + +template<typename Generator, typename Op> +void +scan_test(const communicator& comm, Generator generator, + const char* type_kind, Op op, const char* op_kind) +{ + typedef typename Generator::result_type value_type; + value_type value = generator(comm.rank()); + using boost::mpi::scan; + + if (comm.rank() == 0) { + std::cout << "Prefix reducing to " << op_kind << " of " << type_kind + << "..."; + std::cout.flush(); + } + + value_type result_value; + scan(comm, value, result_value, op); + value_type scan_result = scan(comm, value, op); + BOOST_CHECK(scan_result == result_value); + + // Compute expected result + std::vector<value_type> generated_values; + for (int p = 0; p < comm.size(); ++p) + generated_values.push_back(generator(p)); + std::vector<value_type> expected_results(comm.size()); + std::partial_sum(generated_values.begin(), generated_values.end(), + expected_results.begin(), op); + BOOST_CHECK(result_value == expected_results[comm.rank()]); + if (comm.rank() == 0) std::cout << "Done." << std::endl; + + (comm.barrier)(); +} + +// Generates integers to test with scan() +struct int_generator +{ + typedef int result_type; + + int_generator(int base = 1) : base(base) { } + + int operator()(int p) const { return base + p; } + + private: + int base; +}; + +// Generate points to test with scan() +struct point_generator +{ + typedef point result_type; + + point_generator(point origin) : origin(origin) { } + + point operator()(int p) const + { + return point(origin.x + 1, origin.y + 1, origin.z + 1); + } + + private: + point origin; +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct secret_int_bit_and +{ + int operator()(int x, int y) const { return x & y; } +}; + +struct wrapped_int +{ + wrapped_int() : value(0) { } + explicit wrapped_int(int value) : value(value) { } + + template<typename Archive> + void serialize(Archive& ar, unsigned int /* version */) + { + ar & value; + } + + int value; +}; + +wrapped_int operator+(const wrapped_int& x, const wrapped_int& y) +{ + return wrapped_int(x.value + y.value); +} + +bool operator==(const wrapped_int& x, const wrapped_int& y) +{ + return x.value == y.value; +} + +// Generates wrapped_its to test with scan() +struct wrapped_int_generator +{ + typedef wrapped_int result_type; + + wrapped_int_generator(int base = 1) : base(base) { } + + wrapped_int operator()(int p) const { return wrapped_int(base + p); } + + private: + int base; +}; + +namespace boost { namespace mpi { + +// Make std::plus<wrapped_int> commutative. +template<> +struct is_commutative<std::plus<wrapped_int>, wrapped_int> + : mpl::true_ { }; + +} } // end namespace boost::mpi + +BOOST_AUTO_TEST_CASE(scan_check) +{ + using namespace boost::mpi; + environment env; + communicator comm; + + // Built-in MPI datatypes with built-in MPI operations + scan_test(comm, int_generator(), "integers", std::plus<int>(), "sum"); + scan_test(comm, int_generator(), "integers", std::multiplies<int>(), + "product"); + scan_test(comm, int_generator(), "integers", maximum<int>(), + "maximum"); + scan_test(comm, int_generator(), "integers", minimum<int>(), + "minimum"); + + // User-defined MPI datatypes with operations that have the + // same name as built-in operations. + scan_test(comm, point_generator(point(0,0,0)), "points", + std::plus<point>(), "sum"); + + // Built-in MPI datatypes with user-defined operations + scan_test(comm, int_generator(17), "integers", secret_int_bit_and(), + "bitwise and"); + + // Arbitrary types with user-defined, commutative operations. + scan_test(comm, wrapped_int_generator(17), "wrapped integers", + std::plus<wrapped_int>(), "sum"); + + // Arbitrary types with (non-commutative) user-defined operations + scan_test(comm, string_generator(), "strings", + std::plus<std::string>(), "concatenation"); +} diff --git a/src/boost/libs/mpi/test/scatter_test.cpp b/src/boost/libs/mpi/test/scatter_test.cpp new file mode 100644 index 000000000..f116bb342 --- /dev/null +++ b/src/boost/libs/mpi/test/scatter_test.cpp @@ -0,0 +1,233 @@ +// Copyright (C) 2005, 2006 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the scatter() and scatterv() collectives. +#include <iterator> +#include <boost/mpi/collectives/scatter.hpp> +#include <boost/mpi/collectives/scatterv.hpp> +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include "gps_position.hpp" +#include <boost/serialization/string.hpp> +#include <boost/serialization/list.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> + +#define BOOST_TEST_MODULE mpi_scatter +#include <boost/test/included/unit_test.hpp> + +using namespace boost::mpi; + +template<typename Generator> +void +scatter_test(const communicator& comm, Generator generator, + const char* kind, int root = -1) +{ + typedef typename Generator::result_type value_type; + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + scatter_test(comm, generator, kind, root); + } else { + using boost::mpi::scatter; + + value_type value; + + if (comm.rank() == root) { + std::vector<value_type> values; + + for (int p = 0; p < comm.size(); ++p) + values.push_back(generator(p)); + + std::cout << "Scattering " << kind << " from root " + << root << "..." << std::endl; + + scatter(comm, values, value, root); + } else { + scatter(comm, value, root); + } + + BOOST_CHECK(value == generator(comm.rank())); + } + + comm.barrier(); +} + + +// +// Generators to test with scatter/scatterv +// +struct int_generator +{ + typedef int result_type; + + int operator()(int p) const { return 17 + p; } +}; + +struct gps_generator +{ + typedef gps_position result_type; + + gps_position operator()(int p) const + { + return gps_position(39 + p, 16, 20.2799); + } +}; + +struct string_generator +{ + typedef std::string result_type; + + std::string operator()(int p) const + { + std::string result = boost::lexical_cast<std::string>(p); + result += " rosebud"; + if (p != 1) result += 's'; + return result; + } +}; + +struct string_list_generator +{ + typedef std::list<std::string> result_type; + + std::list<std::string> operator()(int p) const + { + std::list<std::string> result; + for (int i = 0; i <= p; ++i) { + std::string value = boost::lexical_cast<std::string>(i); + result.push_back(value); + } + return result; + } +}; + +std::ostream& +operator<<(std::ostream& out, std::list<std::string> const& l) { + out << '['; + std::copy(l.begin(), l.end(), std::ostream_iterator<std::string>(out, " ")); + out << ']'; + return out; +} + +template<typename Generator> +void +scatterv_test(const communicator& comm, Generator generator, + const char* kind, int root = -1) +{ + typedef typename Generator::result_type value_type; + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + scatterv_test(comm, generator, kind, root); + } else { + using boost::mpi::scatterv; + + int mysize = comm.rank() + 1; + std::vector<value_type> myvalues(mysize); + + if (comm.rank() == root) { + std::vector<value_type> values; + std::vector<int> sizes(comm.size()); + + // process p will receive p+1 identical generator(p) elements + for (int p = 0; p < comm.size(); ++p) { + for (int i = 0; i < p+1; ++i) + values.push_back(generator(p)); + sizes[p] = p + 1; + } + + std::cout << "Scatteringv " << kind << " from root " + << root << "..." << std::endl; + assert(mysize == sizes[comm.rank()]); + scatterv(comm, values, sizes, &(myvalues[0]), root); + } else { + scatterv(comm, &(myvalues[0]), mysize, root); + } + + for (int i = 0; i < mysize; ++i) + BOOST_CHECK(myvalues[i] == generator(comm.rank())); + } + + comm.barrier(); +} + +template<typename Generator> +void +scatterd_test(const communicator& comm, Generator generator, + const char* kind, int root = -1) +{ + typedef typename Generator::result_type value_type; + + if (root == -1) { + for (root = 0; root < comm.size(); ++root) + scatterv_test(comm, generator, kind, root); + } else { + using boost::mpi::scatterv; + + int mysize = comm.rank() + 1; + std::vector<value_type> myvalues(mysize); + + if (comm.rank() == root) { + std::vector<value_type> values; + std::vector<int> sizes(comm.size()); + std::vector<int> displs(comm.size()); + value_type noise = generator(comm.size()+1); + // process p will receive a payload of p+1 identical generator(p) elements + // root will insert pseudo random pading between each payload. + int shift = 0; // the current position of next payload in source array + for (int p = 0; p < comm.size(); ++p) { + int size = p+1; + int pad = p % 3; + // padding + for (int i = 0; i < pad; ++i) { + values.push_back(noise); + } + // payload + for (int i = 0; i < size; ++i) + values.push_back(generator(p)); + shift += pad; + displs[p] = shift; + sizes[p] = size; + shift += size; + } + + std::cout << "Scatteringv " << kind << " from root " + << root << "..." << std::endl; + assert(mysize == sizes[comm.rank()]); + scatterv(comm, values, sizes, displs, &(myvalues[0]), mysize, root); + } else { + scatterv(comm, &(myvalues[0]), mysize, root); + } + + for (int i = 0; i < mysize; ++i) + BOOST_CHECK(myvalues[i] == generator(comm.rank())); + } + + comm.barrier(); +} + + +BOOST_AUTO_TEST_CASE(simple_scatter) +{ + environment env; + communicator comm; + + scatter_test(comm, int_generator(), "integers"); + scatter_test(comm, gps_generator(), "GPS positions"); + scatter_test(comm, string_generator(), "string"); + scatter_test(comm, string_list_generator(), "list of strings"); + + scatterv_test(comm, int_generator(), "integers"); + scatterv_test(comm, gps_generator(), "GPS positions"); + scatterv_test(comm, string_generator(), "string"); + scatterv_test(comm, string_list_generator(), "list of strings"); + + scatterd_test(comm, int_generator(), "integers"); + scatterd_test(comm, gps_generator(), "GPS positions"); + scatterd_test(comm, string_generator(), "string"); + scatterd_test(comm, string_list_generator(), "list of strings"); +} diff --git a/src/boost/libs/mpi/test/sendrecv_test.cpp b/src/boost/libs/mpi/test/sendrecv_test.cpp new file mode 100644 index 000000000..a7fe3a3ee --- /dev/null +++ b/src/boost/libs/mpi/test/sendrecv_test.cpp @@ -0,0 +1,62 @@ +// Copyright Alain Miniussi 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the sendrecv() operation. +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <vector> +#include <algorithm> +#include <boost/serialization/string.hpp> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/lexical_cast.hpp> +#include <numeric> + +#define BOOST_TEST_MODULE mpi_sendrecv +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +struct blob { + blob(int i) : value(i) {} + int value; + template<class Archive> + void serialize(Archive& s, const unsigned int version) { + s & value; + } +}; + +std::ostream& operator<<(std::ostream& out, blob const& b) { + out << "blob(" << b.value << ")"; + return out; +} + +bool operator==(blob const& b1, blob const& b2) { + return b1.value == b2.value; +} + +template<typename T> +void test_sendrecv(mpi::communicator& com) { + int const wrank = com.rank(); + int const wsize = com.size(); + int const wnext((wrank + 1) % wsize); + int const wprev((wrank + wsize - 1) % wsize); + T recv(-1); + com.sendrecv(wnext, 1, T(wrank), wprev, 1, recv); + for(int r = 0; r < wsize; ++r) { + com.barrier(); + if (r == wrank) { + std::cout << "rank " << wrank << " received " << recv << " from " << wprev << '\n'; + } + } + BOOST_CHECK(recv == T(wprev)); +} + +BOOST_AUTO_TEST_CASE(sendrecv) +{ + mpi::environment env; + mpi::communicator world; + test_sendrecv<int>(world); + test_sendrecv<blob>(world); +} diff --git a/src/boost/libs/mpi/test/sendrecv_vector.cpp b/src/boost/libs/mpi/test/sendrecv_vector.cpp new file mode 100644 index 000000000..dc1392858 --- /dev/null +++ b/src/boost/libs/mpi/test/sendrecv_vector.cpp @@ -0,0 +1,95 @@ +// Author: K. Noel Belcourt <kbelco -at- sandia.gov> + +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#if defined(__cplusplus) && (201103L <= __cplusplus) + +#include <array> +#include <cassert> +#include <vector> + +#include "boost/mpi/environment.hpp" +#include "boost/mpi/communicator.hpp" + +using std::array; +using std::vector; + +namespace mpi = boost::mpi; + +struct blob : array<int, 9>, array<double, 3>, array<char, 8> { +}; + +namespace boost { + namespace mpi { + + template <> + struct is_mpi_datatype<blob> : mpl::true_ { + }; + + template <> + MPI_Datatype get_mpi_datatype<blob>(const blob& b) { + array<unsigned long, 3> block_lengths{ + { 9, 3, 8 } + }; + + array<MPI_Aint, 3> displacements{ + { 0, 40, 64 } + }; + + array<MPI_Datatype, 3> datatypes{ + { MPI_INT, MPI_DOUBLE, MPI_CHAR } + }; + + MPI_Datatype blob_type; + MPI_Type_create_struct( + block_lengths.size() + , reinterpret_cast<int*>(block_lengths.data()) + , displacements.data() + , datatypes.data() + , &blob_type); + + MPI_Type_commit(&blob_type); + return blob_type; + } + + } // namespace mpi +} // namespace boost + +#endif // defined(__cplusplus) + + +int main(int argc, char* argv[]) { +#if defined(__cplusplus) && (201103L <= __cplusplus) + + mpi::environment env(argc, argv); + mpi::communicator world; + + vector<blob> data; + + if (world.rank() == 0) { + int size = 10000000; + data.resize(size); + // initialize data at vector ends + blob& b1= data[0]; + array<int, 9>& i = b1; + i[0] = -1; + blob& b2= data[size-1]; + array<int, 9>& d = b2; + d[2] = -17; + world.send(1, 0, data); + } else { + world.recv(0, 0, data); + // check data at vector ends + blob& b1 = data[0]; + array<int, 9>& i = b1; + assert(i[0] == -1); + // blob& b2 = data[data.size()-1]; + // array<int, 9>& d = b2; + // assert(d[2] == -17); + } +#endif // defined(__cplusplus) + + return 0; +} diff --git a/src/boost/libs/mpi/test/skeleton_content_test.cpp b/src/boost/libs/mpi/test/skeleton_content_test.cpp new file mode 100644 index 000000000..6a42fd5e0 --- /dev/null +++ b/src/boost/libs/mpi/test/skeleton_content_test.cpp @@ -0,0 +1,200 @@ +// Copyright 2005 Douglas Gregor. + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// A test of the communicator that transmits skeletons and +// content for data types. +#include <boost/mpi/communicator.hpp> +#include <boost/mpi/environment.hpp> +#include <boost/serialization/list.hpp> +#include <boost/mpi/skeleton_and_content.hpp> +#include <boost/mpi/nonblocking.hpp> +#include <algorithm> +#include <boost/iterator/counting_iterator.hpp> +#include <boost/mpi/collectives/broadcast.hpp> + +#define BOOST_TEST_MODULE mpi_skeleton_content +#include <boost/test/included/unit_test.hpp> + +using boost::mpi::communicator; + +using boost::mpi::packed_skeleton_iarchive; +using boost::mpi::packed_skeleton_oarchive; + +void +test_skeleton_and_content(const communicator& comm, int root, + bool manual_broadcast) +{ + using boost::mpi::skeleton; + using boost::mpi::content; + using boost::mpi::get_content; + using boost::make_counting_iterator; + using boost::mpi::broadcast; + + int list_size = comm.size() + 7; + if (comm.rank() == root) { + // Fill in the seed data + std::list<int> original_list; + for (int i = 0; i < list_size; ++i) + original_list.push_back(i); + + std::cout << "Broadcasting integer list skeleton from root " << root + << "..."; + if (manual_broadcast) { + // Broadcast the skeleton (manually) + for (int p = 0; p < comm.size(); ++p) + if (p != root) comm.send(p, 0, skeleton(original_list)); + } else { + broadcast(comm, skeleton(original_list), root); + } + std::cout << "OK." << std::endl; + + // Broadcast the content (manually) + std::cout << "Broadcasting integer list content from root " << root + << "..."; + { + content c = get_content(original_list); + for (int p = 0; p < comm.size(); ++p) + if (p != root) comm.send(p, 1, c); + } + std::cout << "OK." << std::endl; + + // Reverse the list, broadcast the content again + std::reverse(original_list.begin(), original_list.end()); + std::cout << "Broadcasting reversed integer list content from root " + << root << "..."; + { + content c = get_content(original_list); + for (int p = 0; p < comm.size(); ++p) + if (p != root) comm.send(p, 2, c); + } + std::cout << "OK." << std::endl; + } else { + // Allocate some useless data, to try to get the addresses of the + // list<int>'s used later to be different across processes. + std::list<int> junk_list(comm.rank() * 3 + 1, 17); + + // Receive the skeleton to build up the transferred list + std::list<int> transferred_list; + if (manual_broadcast) { + comm.recv(root, 0, skeleton(transferred_list)); + } else { + broadcast(comm, skeleton(transferred_list), root); + } + BOOST_CHECK((int)transferred_list.size() == list_size); + + // Receive the content and check it + comm.recv(root, 1, get_content(transferred_list)); + BOOST_CHECK(std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.begin())); + + // Receive the reversed content and check it + comm.recv(root, 2, get_content(transferred_list)); + BOOST_CHECK(std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.rbegin())); + } + + (comm.barrier)(); +} + +void +test_skeleton_and_content_nonblocking(const communicator& comm, int root) +{ + using boost::mpi::skeleton; + using boost::mpi::content; + using boost::mpi::get_content; + using boost::make_counting_iterator; + using boost::mpi::broadcast; + using boost::mpi::request; + using boost::mpi::wait_all; + + int list_size = comm.size() + 7; + if (comm.rank() == root) { + // Fill in the seed data + std::list<int> original_list; + for (int i = 0; i < list_size; ++i) + original_list.push_back(i); + + std::cout << "Non-blocking broadcast of integer list skeleton from root " << root + << "..."; + + // Broadcast the skeleton (manually) + { + std::vector<request> reqs; + for (int p = 0; p < comm.size(); ++p) + if (p != root) + reqs.push_back(comm.isend(p, 0, skeleton(original_list))); + wait_all(reqs.begin(), reqs.end()); + } + std::cout << "OK." << std::endl; + + // Broadcast the content (manually) + std::cout << "Non-blocking broadcast of integer list content from root " << root + << "..."; + { + content c = get_content(original_list); + std::vector<request> reqs; + for (int p = 0; p < comm.size(); ++p) + if (p != root) reqs.push_back(comm.isend(p, 1, c)); + wait_all(reqs.begin(), reqs.end()); + } + std::cout << "OK." << std::endl; + + // Reverse the list, broadcast the content again + std::reverse(original_list.begin(), original_list.end()); + std::cout << "Non-blocking broadcast of reversed integer list content from root " + << root << "..."; + { + std::vector<request> reqs; + content c = get_content(original_list); + for (int p = 0; p < comm.size(); ++p) + if (p != root) reqs.push_back(comm.isend(p, 2, c)); + wait_all(reqs.begin(), reqs.end()); + } + std::cout << "OK." << std::endl; + } else { + // Allocate some useless data, to try to get the addresses of the + // list<int>'s used later to be different across processes. + std::list<int> junk_list(comm.rank() * 3 + 1, 17); + + // Receive the skeleton to build up the transferred list + std::list<int> transferred_list; + request req = comm.irecv(root, 0, skeleton(transferred_list)); + req.wait(); + BOOST_CHECK((int)transferred_list.size() == list_size); + + // Receive the content and check it + req = comm.irecv(root, 1, get_content(transferred_list)); + req.wait(); + BOOST_CHECK(std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.begin())); + + // Receive the reversed content and check it + req = comm.irecv(root, 2, get_content(transferred_list)); + req.wait(); + BOOST_CHECK(std::equal(make_counting_iterator(0), + make_counting_iterator(list_size), + transferred_list.rbegin())); + } + + (comm.barrier)(); +} + +BOOST_AUTO_TEST_CASE(sendrecv) +{ + boost::mpi::environment env; + communicator comm; + BOOST_TEST_REQUIRE(comm.size() > 1); + + test_skeleton_and_content(comm, 0, true); + test_skeleton_and_content(comm, 0, false); + test_skeleton_and_content(comm, 1, true); + test_skeleton_and_content(comm, 1, false); + test_skeleton_and_content_nonblocking(comm, 0); + test_skeleton_and_content_nonblocking(comm, 1); +} diff --git a/src/boost/libs/mpi/test/version_test.cpp b/src/boost/libs/mpi/test/version_test.cpp new file mode 100644 index 000000000..0895b74f4 --- /dev/null +++ b/src/boost/libs/mpi/test/version_test.cpp @@ -0,0 +1,60 @@ +// Copyright (C) 2013 Alain Miniussi <alain.miniussi@oca.eu> + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +// test mpi version + +#include <boost/mpi/environment.hpp> +#include <boost/mpi/communicator.hpp> +#include <iostream> + +#define BOOST_TEST_MODULE mpi_version +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +void +test_version(mpi::communicator const& comm) { +#if defined(MPI_VERSION) + int mpi_version = MPI_VERSION; + int mpi_subversion = MPI_SUBVERSION; +#else + int mpi_version = 0; + int mpi_subversion = 0; +#endif + + std::pair<int,int> version = mpi::environment::version(); + if (comm.rank() == 0) { + std::cout << "MPI Version: " << version.first << ',' << version.second << '\n'; + } + BOOST_CHECK(version.first == mpi_version); + BOOST_CHECK(version.second == mpi_subversion); +} + +std::string +yesno(bool b) { + return b ? std::string("yes") : std::string("no"); +} + +void +report_features(mpi::communicator const& comm) { + if (comm.rank() == 0) { + std::cout << "Assuming working MPI_Improbe:" << +#if defined(BOOST_MPI_USE_IMPROBE) + "yes" << '\n'; +#else + "no" << '\n'; +#endif + } +} + +BOOST_AUTO_TEST_CASE(version) +{ + mpi::environment env; + mpi::communicator world; + + test_version(world); + report_features(world); +} diff --git a/src/boost/libs/mpi/test/wait_all_vector_test.cpp b/src/boost/libs/mpi/test/wait_all_vector_test.cpp new file mode 100644 index 000000000..0e4c800bb --- /dev/null +++ b/src/boost/libs/mpi/test/wait_all_vector_test.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2017 Alain Miniussi & Vincent Chabannes + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <string> +#include <iostream> +#include <sstream> +#include <vector> + +#include <boost/mpi.hpp> +#include <boost/mpi/nonblocking.hpp> +#include <boost/serialization/string.hpp> + +#define BOOST_TEST_MODULE mpi_wait_any +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +BOOST_AUTO_TEST_CASE(wait_any) +{ + mpi::environment env; + mpi::communicator comm; + + int rank = comm.rank(); + int const sz = 10; + std::vector<int> data; + std::vector< mpi::request> reqs; + if ( rank == 0 ) { + for ( int i=0; i<sz; ++i ) { + data.push_back( i ); + } + reqs.push_back( comm.isend(1, 0, data) ); + } else if ( rank == 1 ) { + reqs.push_back( comm.irecv(0, 0, data) ); + } + mpi::wait_all( reqs.begin(), reqs.end() ); + + if ( rank == 1 ) { + BOOST_CHECK(data.size() == sz); + for ( int i=0; i<sz; ++i ) { + BOOST_CHECK(data[i] == i); + } + } +} diff --git a/src/boost/libs/mpi/test/wait_any_test.cpp b/src/boost/libs/mpi/test/wait_any_test.cpp new file mode 100644 index 000000000..e1f4c556d --- /dev/null +++ b/src/boost/libs/mpi/test/wait_any_test.cpp @@ -0,0 +1,64 @@ +// Copyright (C) 2017 Alain Miniussi & Steffen Hirschmann + +// Use, modification and distribution is subject to the Boost Software +// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include <string> +#include <iostream> +#include <sstream> +#include <vector> +#include <set> + +#include <boost/mpi.hpp> +#include <boost/mpi/nonblocking.hpp> +#include <boost/serialization/string.hpp> + +#define BOOST_TEST_MODULE mpi_wait_any +#include <boost/test/included/unit_test.hpp> + +namespace mpi = boost::mpi; + +BOOST_AUTO_TEST_CASE(wait_any) +{ + mpi::environment env; + mpi::communicator world; + + std::vector<std::string> ss(world.size()); + typedef std::vector<mpi::request> requests; + requests rreqs; + + std::set<int> pending_senders; + for (int i = 0; i < world.size(); ++i) { + rreqs.push_back(world.irecv(i, i, ss[i])); + pending_senders.insert(i); + } + + std::ostringstream fmt; + std::string msg = "Hello, World! this is "; + fmt << msg << world.rank(); + + requests sreqs; + for (int i = 0; i < world.size(); ++i) { + sreqs.push_back(world.isend(i, world.rank(), fmt.str())); + } + + for (int i = 0; i < world.size(); ++i) { + std::pair<mpi::status, requests::iterator> completed = mpi::wait_any(rreqs.begin(), rreqs.end()); + std::ostringstream out; + out << "Proc " << world.rank() << " got message from " << completed.first.source() << '\n'; + std::cout << out.str(); + } + + for (int i = 0; i < world.size(); ++i) { + std::ostringstream fmt; + fmt << msg << i; + std::vector<std::string>::iterator found = std::find(ss.begin(), ss.end(), fmt.str()); + BOOST_CHECK(found != ss.end()); + fmt.str(""); + fmt << "Proc " << world.rank() << " Got msg from " << i << '\n'; + std::cout << fmt.str(); + } + + mpi::wait_all(sreqs.begin(), sreqs.end()); +} |