5#ifndef DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
6#define DUNE_COMMON_PARALLEL_MPICOMMUNICATION_HH
39 template<
typename Type,
typename BinaryFunction,
typename Enable=
void>
48 op = std::make_unique<MPI_Op>();
53 MPI_Op_create((
void (*)(
void*,
void*,
int*, MPI_Datatype*))&operation,
true,op.get());
58 static void operation (Type *in, Type *inout,
int *len, MPI_Datatype*)
62 for (
int i=0; i< *len; ++i, ++in, ++inout) {
64 temp = func(*in, *inout);
69 Generic_MPI_Op (
const Generic_MPI_Op& ) {}
70 static std::unique_ptr<MPI_Op> op;
74 template<
typename Type,
typename BinaryFunction,
typename Enable>
75 std::unique_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction, Enable>::op;
77#define ComposeMPIOp(func,op) \
78 template<class T, class S> \
79 class Generic_MPI_Op<T, func<S>, std::enable_if_t<MPITraits<S>::is_intrinsic> >{ \
81 static MPI_Op get(){ \
85 Generic_MPI_Op () {} \
86 Generic_MPI_Op (const Generic_MPI_Op & ) {} \
114 if(communicator!=MPI_COMM_NULL) {
116 MPI_Initialized(&initialized);
118 DUNE_THROW(
ParallelError,
"You must call MPIHelper::instance(argc,argv) in your main() function before using the MPI Communication!");
119 MPI_Comm_rank(communicator,&me);
120 MPI_Comm_size(communicator,&procs);
146 int send(
const T& data,
int dest_rank,
int tag)
const
149 return MPI_Send(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
150 dest_rank, tag, communicator);
159 MPI_Isend(mpidata.ptr(), mpidata.size(), mpidata.type(),
160 dest_rank, tag, communicator, &future.req_);
166 T
recv(T&& data,
int source_rank,
int tag, MPI_Status* status = MPI_STATUS_IGNORE)
const
168 T lvalue_data(std::forward<T>(data));
170 MPI_Recv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(),
171 source_rank, tag, communicator, status);
181 if (mpidata.size() == 0)
183 MPI_Irecv(mpidata.ptr(), mpidata.size(), mpidata.type(),
184 source_rank, tag, communicator, &future.req_);
189 T
rrecv(T&& data,
int source_rank,
int tag, MPI_Status* status = MPI_STATUS_IGNORE)
const
192 MPI_Message _message;
193 T lvalue_data(std::forward<T>(data));
195 static_assert(!mpi_data.static_size,
"rrecv work only for non-static-sized types.");
196 if(status == MPI_STATUS_IGNORE)
198 MPI_Mprobe(source_rank, tag, communicator, &_message, status);
200 MPI_Get_count(status, mpi_data.type(), &
size);
201 mpi_data.resize(
size);
202 MPI_Mrecv(mpi_data.ptr(), mpi_data.size(), mpi_data.type(), &_message, status);
211 allreduce<std::plus<T> >(&in,&out,1);
217 int sum (T* inout,
int len)
const
219 return allreduce<std::plus<T> >(inout,len);
227 allreduce<std::multiplies<T> >(&in,&out,1);
233 int prod (T* inout,
int len)
const
235 return allreduce<std::multiplies<T> >(inout,len);
243 allreduce<Min<T> >(&in,&out,1);
249 int min (T* inout,
int len)
const
251 return allreduce<Min<T> >(inout,len);
260 allreduce<Max<T> >(&in,&out,1);
266 int max (T* inout,
int len)
const
268 return allreduce<Max<T> >(inout,len);
274 return MPI_Barrier(communicator);
281 MPI_Ibarrier(communicator, &future.req_);
298 MPI_Ibcast(mpidata.ptr(),
310 int gather (
const T* in, T* out,
int len,
int root)
const
318 template<
class TIN,
class TOUT = std::vector<TIN>>
323 assert(root != me || mpidata_in.size()*procs <= mpidata_out.size());
324 int outlen = (me==root) * mpidata_in.size();
325 MPI_Igather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
326 mpidata_out.ptr(), outlen, mpidata_out.type(),
327 root, communicator, &future.req_);
333 int gatherv (
const T* in,
int sendDataLen, T* out,
int* recvDataLen,
int* displ,
int root)
const
343 int scatter (
const T* sendData, T* recvData,
int len,
int root)
const
351 template<
class TIN,
class TOUT = TIN>
357 int inlen = (me==root) * mpidata_in.size()/procs;
358 MPI_Iscatter(mpidata_in.ptr(), inlen, mpidata_in.type(),
359 mpidata_out.ptr(), mpidata_out.size(), mpidata_out.type(),
360 root, communicator, &future.req_);
366 int scatterv (
const T* sendData,
int* sendDataLen,
int* displ, T* recvData,
int recvDataLen,
int root)
const
374 operator MPI_Comm ()
const
380 template<
typename T,
typename T1>
389 template<
class TIN,
class TOUT = TIN>
395 assert(mpidata_in.size()*procs <= mpidata_out.size());
396 int outlen = mpidata_in.size();
397 MPI_Iallgather(mpidata_in.ptr(), mpidata_in.size(), mpidata_in.type(),
398 mpidata_out.ptr(), outlen, mpidata_out.type(),
399 communicator, &future.req_);
405 int allgatherv (
const T* in,
int sendDataLen, T* out,
int* recvDataLen,
int* displ)
const
413 template<
typename BinaryFunction,
typename Type>
416 Type* out =
new Type[len];
417 int ret = allreduce<BinaryFunction>(inout,out,len);
418 std::copy(out, out+len, inout);
423 template<
typename BinaryFunction,
typename Type>
425 Type lvalue_data = std::forward<Type>(in);
427 MPI_Allreduce(MPI_IN_PLACE, data.ptr(), data.size(), data.type(),
434 template<
class BinaryFunction,
class TIN,
class TOUT = TIN>
439 assert(mpidata_out.size() == mpidata_in.size());
440 assert(mpidata_out.type() == mpidata_in.type());
441 MPI_Iallreduce(mpidata_in.ptr(), mpidata_out.ptr(),
442 mpidata_out.size(), mpidata_out.type(),
444 communicator, &future.req_);
449 template<
class BinaryFunction,
class T>
453 MPI_Iallreduce(MPI_IN_PLACE, mpidata.ptr(),
454 mpidata.size(), mpidata.type(),
456 communicator, &future.req_);
461 template<
typename BinaryFunction,
typename Type>
469 MPI_Comm communicator;
A few common exception classes.
helper classes to provide unique types for standard functions
Interface class to translate objects to a MPI_Datatype, void* and size used for MPI calls.
Implements an utility class that provides collective communication methods for sequential programs.
Traits classes for mapping types onto MPI_Datatype.
#define ComposeMPIOp(func, op)
Definition mpicommunication.hh:77
#define DUNE_THROW(E, m)
Definition exceptions.hh:218
Dune namespace.
Definition alignedallocator.hh:13
auto getMPIData(T &t)
Definition mpidata.hh:44
A traits class describing the mapping of types onto MPI_Datatypes.
Definition mpitraits.hh:41
Definition binaryfunctions.hh:18
Definition binaryfunctions.hh:34
Default exception if an error in the parallel communication of the program occurred.
Definition exceptions.hh:287
Collective communication interface and sequential default implementation.
Definition communication.hh:100
int size() const
Number of processes in set, is greater than 0.
Definition communication.hh:126
Definition mpicommunication.hh:41
static MPI_Op get()
Definition mpicommunication.hh:44
int max(T *inout, int len) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:266
int allgatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ) const
Gathers data of variable length from all tasks and distribute it to all.
Definition mpicommunication.hh:405
T max(const T &in) const
Compute the maximum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:257
MPIFuture< T > ibroadcast(T &&data, int root) const
Distribute an array from the process with rank root to all other processes nonblocking.
Definition mpicommunication.hh:295
MPIFuture< void > ibarrier() const
Nonblocking barrier.
Definition mpicommunication.hh:278
T recv(T &&data, int source_rank, int tag, MPI_Status *status=MPI_STATUS_IGNORE) const
Receives the data from the source_rank.
Definition mpicommunication.hh:166
int barrier() const
Wait until all processes have arrived at this point in the program.
Definition mpicommunication.hh:272
int rank() const
Return rank, is between 0 and size()-1.
Definition mpicommunication.hh:133
int scatterv(const T *sendData, int *sendDataLen, int *displ, T *recvData, int recvDataLen, int root) const
Scatter arrays of variable length from a root to all other tasks.
Definition mpicommunication.hh:366
MPIFuture< T > isend(T &&data, int dest_rank, int tag) const
Sends the data to the dest_rank nonblocking.
Definition mpicommunication.hh:155
MPIFuture< TOUT, TIN > iallgather(TIN &&data_in, TOUT &&data_out) const
Gathers data from all tasks and distribute it to all nonblocking.
Definition mpicommunication.hh:390
Type allreduce(Type &&in) const
Definition mpicommunication.hh:424
int sum(T *inout, int len) const
Compute the sum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:217
int broadcast(T *inout, int len, int root) const
Distribute an array from the process with rank root to all other processes.
Definition mpicommunication.hh:288
MPIFuture< T > iallreduce(T &&data) const
Compute something over all processes nonblocking.
Definition mpicommunication.hh:450
T sum(const T &in) const
Compute the sum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:208
int allreduce(const Type *in, Type *out, int len) const
Definition mpicommunication.hh:462
MPIFuture< TOUT, TIN > iallreduce(TIN &&data_in, TOUT &&data_out) const
Compute something over all processes nonblocking.
Definition mpicommunication.hh:435
int size() const
Number of processes in set, is greater than 0.
Definition mpicommunication.hh:139
int gather(const T *in, T *out, int len, int root) const
Gather arrays on root task.
Definition mpicommunication.hh:310
int allreduce(Type *inout, int len) const
Compute something over all processes for each component of an array and return the result in every pr...
Definition mpicommunication.hh:414
T rrecv(T &&data, int source_rank, int tag, MPI_Status *status=MPI_STATUS_IGNORE) const
Definition mpicommunication.hh:189
int scatter(const T *sendData, T *recvData, int len, int root) const
Scatter array from a root to all other task.
Definition mpicommunication.hh:343
MPIFuture< T > irecv(T &&data, int source_rank, int tag) const
Receives the data from the source_rank nonblocking.
Definition mpicommunication.hh:177
int prod(T *inout, int len) const
Compute the product of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:233
MPIFuture< TOUT, TIN > igather(TIN &&data_in, TOUT &&data_out, int root) const
Gather arrays on root task nonblocking.
Definition mpicommunication.hh:319
T min(const T &in) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:240
Communication(const MPI_Comm &c=MPI_COMM_WORLD)
Instantiation using a MPI communicator.
Definition mpicommunication.hh:111
MPIFuture< TOUT, TIN > iscatter(TIN &&data_in, TOUT &&data_out, int root) const
Scatter array from a root to all other task nonblocking.
Definition mpicommunication.hh:352
int gatherv(const T *in, int sendDataLen, T *out, int *recvDataLen, int *displ, int root) const
Gather arrays of variable size on root task.
Definition mpicommunication.hh:333
int min(T *inout, int len) const
Compute the minimum of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:249
int allgather(const T *sbuf, int count, T1 *rbuf) const
Gathers data from all tasks and distribute it to all.
Definition mpicommunication.hh:381
int send(const T &data, int dest_rank, int tag) const
Sends the data to the dest_rank.
Definition mpicommunication.hh:146
Communication(const Communication< No_Comm > &)
Converting constructor for no-communication that is interpreted as MPI_COMM_SELF.
Definition mpicommunication.hh:128
T prod(const T &in) const
Compute the product of the argument over all processes and return the result in every process....
Definition mpicommunication.hh:224
Provides a future-like object for MPI communication. It contains the object that will be received and...
Definition mpifuture.hh:93
auto get_send_mpidata()
Definition mpifuture.hh:177
auto get_mpidata()
Definition mpifuture.hh:173