dune-common 2.9.0
|
Collective communication interface and sequential default implementation. More...
#include <dune/common/parallel/communication.hh>
Public Member Functions | |
Communication () | |
Construct default object. | |
Communication (const Communicator &) | |
Constructor with a given communicator. | |
int | rank () const |
Return rank, is between 0 and size()-1. | |
operator No_Comm () const | |
cast to the underlying Fake MPI communicator | |
int | size () const |
Number of processes in set, is greater than 0. | |
template<class T > | |
int | send (const T &data, int dest_rank, int tag) |
Sends the data to the dest_rank. | |
template<class T > | |
PseudoFuture< T > | isend (const T &&data, int dest_rank, int tag) |
Sends the data to the dest_rank nonblocking. | |
template<class T > | |
T | recv (T &&data, int source_rank, int tag, void *status=0) |
Receives the data from the source_rank. | |
template<class T > | |
PseudoFuture< T > | irecv (T &&data, int source_rank, int tag) |
Receives the data from the source_rank nonblocking. | |
template<class T > | |
T | rrecv (T &&data, int source_rank, int tag, void *status=0) const |
template<typename T > | |
T | sum (const T &in) const |
Compute the sum of the argument over all processes and return the result in every process. Assumes that T has an operator+. | |
template<typename T > | |
int | sum (T *inout, int len) const |
Compute the sum over all processes for each component of an array and return the result in every process. Assumes that T has an operator+. | |
template<typename T > | |
T | prod (const T &in) const |
Compute the product of the argument over all processes and return the result in every process. Assumes that T has an operator*. | |
template<typename T > | |
int | prod (T *inout, int len) const |
Compute the product over all processes for each component of an array and return the result in every process. Assumes that T has an operator*. | |
template<typename T > | |
T | min (const T &in) const |
Compute the minimum of the argument over all processes and return the result in every process. Assumes that T has an operator<. | |
template<typename T > | |
int | min (T *inout, int len) const |
Compute the minimum over all processes for each component of an array and return the result in every process. Assumes that T has an operator<. | |
template<typename T > | |
T | max (const T &in) const |
Compute the maximum of the argument over all processes and return the result in every process. Assumes that T has an operator<. | |
template<typename T > | |
int | max (T *inout, int len) const |
Compute the maximum over all processes for each component of an array and return the result in every process. Assumes that T has an operator<. | |
int | barrier () const |
Wait until all processes have arrived at this point in the program. | |
PseudoFuture< void > | ibarrier () const |
Nonblocking barrier. | |
template<typename T > | |
int | broadcast (T *inout, int len, int root) const |
Distribute an array from the process with rank root to all other processes. | |
template<class T > | |
PseudoFuture< T > | ibroadcast (T &&data, int root) const |
Distribute an array from the process with rank root to all other processes nonblocking. | |
template<typename T > | |
int | gather (const T *in, T *out, int len, int root) const |
Gather arrays on root task. | |
template<class TIN , class TOUT = std::vector<TIN>> | |
PseudoFuture< TOUT > | igather (TIN &&data_in, TOUT &&data_out, int root) |
Gather arrays on root task nonblocking. | |
template<typename T > | |
int | gatherv (const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ, int root) const |
Gather arrays of variable size on root task. | |
template<typename T > | |
int | scatter (const T *sendData, T *recvData, int len, int root) const |
Scatter array from a root to all other task. | |
template<class TIN , class TOUT = TIN> | |
PseudoFuture< TOUT > | iscatter (TIN &&data_in, TOUT &&data_out, int root) |
Scatter array from a root to all other task nonblocking. | |
template<typename T > | |
int | scatterv (const T *sendData, int *sendDataLen, int *displ, T *recvData, int recvDataLen, int root) const |
Scatter arrays of variable length from a root to all other tasks. | |
template<typename T > | |
int | allgather (const T *sbuf, int count, T *rbuf) const |
Gathers data from all tasks and distribute it to all. | |
template<class TIN , class TOUT = TIN> | |
PseudoFuture< TOUT > | iallgather (TIN &&data_in, TOUT &&data_out) |
Gathers data from all tasks and distribute it to all nonblocking. | |
template<typename T > | |
int | allgatherv (const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ) const |
Gathers data of variable length from all tasks and distribute it to all. | |
template<typename BinaryFunction , typename Type > | |
int | allreduce (Type *inout, int len) const |
Compute something over all processes for each component of an array and return the result in every process. | |
template<class BinaryFunction , class TIN , class TOUT = TIN> | |
PseudoFuture< TOUT > | iallreduce (TIN &&data_in, TOUT &&data_out) |
Compute something over all processes nonblocking. | |
template<class BinaryFunction , class T > | |
PseudoFuture< T > | iallreduce (T &&data) |
Compute something over all processes nonblocking and in-place. | |
template<typename BinaryFunction , typename Type > | |
int | allreduce (const Type *in, Type *out, int len) const |
Compute something over all processes for each component of an array and return the result in every process. | |
Collective communication interface and sequential default implementation.
Communication offers an abstraction to the basic methods of parallel communication, following the message-passing paradigm. It allows one to switch parallel features on and off, without changing the code. Currently only MPI and sequential code are supported.
A Communication object is returned by all grids (also the sequential ones) in order to allow code to be written in a transparent way for sequential and parallel grids.
This class provides a default implementation for sequential grids. The number of processes involved is 1, any sum, maximum, etc. returns just its input argument and so on.
In specializations one can implement the real thing using appropriate communication functions, e.g. there exists an implementation using the Message Passing Interface (MPI), see Dune::Communication<MPI_Comm>.
Moreover, the communication subsystem used by an implementation is not visible in the interface, i.e. Dune grid implementations are not restricted to MPI.
Communicator | The communicator type used by your message-passing implementation. For MPI this will be MPI_Comm. For sequential codes there is the dummy communicator No_Comm. It is assumed that if you want to specialize the Communication class for a message-passing system other than MPI, that message-passing system will have something equivalent to MPI communicators. |
|
inline |
Construct default object.
|
inline |
Constructor with a given communicator.
As this is implementation for the sequential setting, the communicator is a dummy and simply discarded.
|
inline |
Gathers data from all tasks and distribute it to all.
The block of data sent from the jth process is received by every process and placed in the jth block of the buffer recvbuf.
[in] | sbuf | The buffer with the data to send. Has to be the same for each task. |
[in] | count | The number of elements to send by any process. |
[out] | rbuf | The receive buffer for the data. Has to be of size notasks*count, with notasks being the number of tasks in the communicator. |
|
inline |
Gathers data of variable length from all tasks and distribute it to all.
The block of data sent from the jth process is received by every process and placed in the jth block of the buffer out.
[in] | in | The send buffer with the data to send. |
[in] | sendDataLen | The number of elements to send on each task. |
[out] | out | The buffer to store the received data in. |
[in] | recvDataLen | An array with size equal to the number of processes containing the number of elements to receive from process i at position i, i.e. the number that is passed as sendDataLen argument to this function in process i. |
[in] | displ | An array with size equal to the number of processes. Data received from process i will be written starting at out+displ[i]. |
|
inline |
Compute something over all processes for each component of an array and return the result in every process.
The template parameter BinaryFunction is the type of the binary function to use for the computation
in | The array to compute on. |
out | The array to store the results in. |
len | The number of components in the array |
|
inline |
Compute something over all processes for each component of an array and return the result in every process.
The template parameter BinaryFunction is the type of the binary function to use for the computation
inout | The array to compute on. |
len | The number of components in the array |
|
inline |
Wait until all processes have arrived at this point in the program.
|
inline |
Distribute an array from the process with rank root to all other processes.
|
inline |
Gather arrays on root task.
Each process sends its in array of length len to the root process (including the root itself). In the root process these arrays are stored in rank order in the out array which must have size len * number of processes.
[in] | in | The send buffer with the data to send. |
[out] | out | The buffer to store the received data in. Might have length zero on non-root tasks. |
[in] | len | The number of elements to send on each task. |
[in] | root | The root task that gathers the data. |
|
inline |
Gather arrays of variable size on root task.
Each process sends its in array of length sendDataLen to the root process (including the root itself). In the root process these arrays are stored in rank order in the out array.
[in] | in | The send buffer with the data to be sent |
[in] | sendDataLen | The number of elements to send on each task |
[out] | out | The buffer to store the received data in. May have length zero on non-root tasks. |
[in] | recvDataLen | An array with size equal to the number of processes containing the number of elements to receive from process i at position i, i.e. the number that is passed as sendDataLen argument to this function in process i. May have length zero on non-root tasks. |
[out] | displ | An array with size equal to the number of processes. Data received from process i will be written starting at out+displ[i] on the root process. May have length zero on non-root tasks. |
[in] | root | The root task that gathers the data. |
|
inline |
Gathers data from all tasks and distribute it to all nonblocking.
|
inline |
Compute something over all processes nonblocking and in-place.
|
inline |
Compute something over all processes nonblocking.
|
inline |
Nonblocking barrier.
|
inline |
Distribute an array from the process with rank root to all other processes nonblocking.
|
inline |
Gather arrays on root task nonblocking.
|
inline |
Receives the data from the source_rank nonblocking.
|
inline |
Scatter array from a root to all other task nonblocking.
|
inline |
Sends the data to the dest_rank nonblocking.
|
inline |
Compute the maximum of the argument over all processes and return the result in every process. Assumes that T has an operator<.
|
inline |
Compute the maximum over all processes for each component of an array and return the result in every process. Assumes that T has an operator<.
|
inline |
Compute the minimum of the argument over all processes and return the result in every process. Assumes that T has an operator<.
|
inline |
Compute the minimum over all processes for each component of an array and return the result in every process. Assumes that T has an operator<.
|
inline |
cast to the underlying Fake MPI communicator
|
inline |
Compute the product of the argument over all processes and return the result in every process. Assumes that T has an operator*.
|
inline |
Compute the product over all processes for each component of an array and return the result in every process. Assumes that T has an operator*.
|
inline |
Return rank, is between 0 and size()-1.
|
inline |
Receives the data from the source_rank.
|
inline |
|
inline |
Scatter array from a root to all other task.
The root process sends the elements with index from k*len to (k+1)*len-1 in its array to task k, which stores it at index 0 to len-1.
[in] | sendData | The array to scatter. Might have length zero on non-root tasks. |
[out] | recvData | The buffer to store the received data in. Upon completion of the method each task will have same data stored there as the one in send buffer of the root task before. |
[in] | len | The number of elements in the recv buffer. |
[in] | root | The root task that gathers the data. |
|
inline |
Scatter arrays of variable length from a root to all other tasks.
The root process sends the elements with index from send+displ[k] to send+displ[k]-1 in its array to task k, which stores it at index 0 to recvDataLen-1.
[in] | sendData | The array to scatter. May have length zero on non-root tasks. |
[in] | sendDataLen | An array with size equal to the number of processes containing the number of elements to scatter to process i at position i, i.e. the number that is passed as recvDataLen argument to this function in process i. |
[in] | displ | An array with size equal to the number of processes. Data scattered to process i will be read starting at send+displ[i] on root the process. |
[out] | recvData | The buffer to store the received data in. Upon completion of the method each task will have the same data stored there as the one in send buffer of the root task before. |
[in] | recvDataLen | The number of elements in the recvData buffer. |
[in] | root | The root task that gathers the data. |
|
inline |
Sends the data to the dest_rank.
|
inline |
Number of processes in set, is greater than 0.
|
inline |
Compute the sum of the argument over all processes and return the result in every process. Assumes that T has an operator+.
|
inline |
Compute the sum over all processes for each component of an array and return the result in every process. Assumes that T has an operator+.