3 #ifndef DUNE_MPICOLLECTIVECOMMUNICATION_HH
4 #define DUNE_MPICOLLECTIVECOMMUNICATION_HH
23 #include "collectivecommunication.hh"
24 #include "mpitraits.hh"
38 template<
typename Type,
typename BinaryFunction>
48 MPI_Op_create((
void (*)(
void*,
void*,
int*, MPI_Datatype*))&operation,
true,op.
get());
53 static void operation (Type *in, Type *inout,
int *len, MPI_Datatype *dptr)
57 for (
int i=0; i< *len; ++i, ++in, ++inout) {
59 temp = func(*in, *inout);
64 Generic_MPI_Op (
const Generic_MPI_Op& ) {}
65 static shared_ptr<MPI_Op> op;
69 template<
typename Type,
typename BinaryFunction>
70 shared_ptr<MPI_Op> Generic_MPI_Op<Type,BinaryFunction>::op = shared_ptr<MPI_Op>(
static_cast<MPI_Op*
>(0));
72 #define ComposeMPIOp(type,func,op) \
74 class Generic_MPI_Op<type, func<type> >{ \
76 static MPI_Op get(){ \
81 Generic_MPI_Op (const Generic_MPI_Op & ) {}\
100 ComposeMPIOp(
unsigned short, std::multiplies, MPI_PROD);
152 if(communicator!=MPI_COMM_NULL) {
153 MPI_Comm_rank(communicator,&me);
154 MPI_Comm_size(communicator,&procs);
178 allreduce<std::plus<T> >(&in,&out,1);
184 int sum (T* inout,
int len)
const
186 return allreduce<std::plus<T> >(inout,len);
194 allreduce<std::multiplies<T> >(&in,&out,1);
200 int prod (T* inout,
int len)
const
202 return allreduce<std::multiplies<T> >(inout,len);
210 allreduce<Min<T> >(&in,&out,1);
216 int min (T* inout,
int len)
const
218 return allreduce<Min<T> >(inout,len);
227 allreduce<Max<T> >(&in,&out,1);
233 int max (T* inout,
int len)
const
235 return allreduce<Max<T> >(inout,len);
241 return MPI_Barrier(communicator);
254 int gather (T* in, T* out,
int len,
int root)
const
264 int scatter (T* send, T* recv,
int len,
int root)
const
271 operator MPI_Comm ()
const
277 template<
typename T,
typename T1>
286 template<
typename BinaryFunction,
typename Type>
289 Type* out =
new Type[len];
290 int ret = allreduce<BinaryFunction>(inout,out,len);
291 std::copy(out, out+len, inout);
297 template<
typename BinaryFunction,
typename Type>
305 MPI_Comm communicator;