6#define CheckMPIStatus(A,B) {}
12#include <dune/common/fvector.hh>
13#include <dune/common/hybridutilities.hh>
15#include <dune/geometry/type.hh>
24 struct MPITypeInfo {};
27 struct MPITypeInfo< int >
29 static const unsigned int size = 1;
30 static inline MPI_Datatype getType()
36 template<
typename K,
int N>
37 struct MPITypeInfo<
Dune::FieldVector<K,N> >
39 static const unsigned int size = N;
40 static inline MPI_Datatype getType()
42 return Dune::MPITraits<K>::getType();
47 struct MPITypeInfo< unsigned int >
49 static const unsigned int size = 1;
50 static inline MPI_Datatype getType()
57 struct MPITypeInfo<
Dune::GeometryType >
59 static const unsigned int size = 1;
60 static inline MPI_Datatype getType()
62 return Dune::MPITraits< Dune::GeometryType >::getType();
67 void MPI_SetVectorSize(
68 std::vector<T> & data,
71 typedef MPITypeInfo<T> Info;
73 MPI_Get_count(&status, Info::getType(), &sz);
74 assert(sz%Info::size == 0);
75 data.resize(sz/Info::size);
88 void MPI_SendVectorInRing(
89 std::vector<T> & data,
90 std::vector<T> & next,
100 int result DUNE_UNUSED;
102 typedef MPITypeInfo<T> Info;
104 next.resize(next.capacity());
108 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, tag,
113 &(next[0]), Info::size*next.size(), Info::getType(), leftrank, tag,
125 template<
typename... Args>
126 struct call_MPI_SendVectorInRing
128 std::tuple<Args...> & remotedata;
129 std::tuple<Args...> & nextdata;
134 std::array<MPI_Request,
sizeof...(Args)> & requests_send;
135 std::array<MPI_Request,
sizeof...(Args)> & requests_recv;
140 MPI_SendVectorInRing(
141 std::get<i>(remotedata),
142 std::get<i>(nextdata),
144 rightrank, leftrank, mpicomm,
149 template<
typename... Args>
150 struct call_MPI_SetVectorSize
152 std::tuple<Args...> & nextdata;
153 std::array<MPI_Status,
sizeof...(Args)> & status_recv;
158 MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
162 template<
typename OP, std::size_t... Indices,
typename... Args>
163 void MPI_AllApply_impl(MPI_Comm mpicomm,
165 std::index_sequence<Indices...> indices,
168 constexpr std::size_t N =
sizeof...(Args);
172 MPI_Comm_rank(mpicomm, &myrank);
173 MPI_Comm_size(mpicomm, &commsize);
178#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
179 std::cout << myrank <<
" Start Communication, size " << commsize << std::endl;
183 std::array<unsigned int, N> size({ ((
unsigned int)data.size())... });
186 std::array<unsigned int, N> maxSize;
187 MPI_Allreduce(&size, &maxSize,
188 size.size(), MPI_UNSIGNED, MPI_MAX, mpicomm);
189#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
190 std::cout << myrank <<
" maxSize " <<
"done... " << std::endl;
194 std::tuple<Args...> remotedata { Args(maxSize[Indices])... };
197 remotedata = std::tie(data...);
200 std::tuple<Args...> nextdata { Args(maxSize[Indices])... };
203 int rightrank = (myrank + 1 + commsize) % commsize;
204 int leftrank = (myrank - 1 + commsize) % commsize;
206 std::cout << myrank <<
": size = " << commsize << std::endl;
207 std::cout << myrank <<
": left = " << leftrank
208 <<
" right = " << rightrank << std::endl;
211 int remoterank = myrank;
213 for (
int i=1; i<commsize; i++)
216 int nextrank = (myrank - i + commsize) % commsize;
218 std::cout << myrank <<
": next = " << nextrank << std::endl;
221 std::array<MPI_Request,N> requests_send;
222 std::array<MPI_Request,N> requests_recv;
225 Dune::Hybrid::forEach(indices,
235 call_MPI_SendVectorInRing<Args...>({
239 rightrank, leftrank, mpicomm,
245 op(remoterank,std::get<Indices>(remotedata)...);
248 std::array<MPI_Status,N> status_send;
249 std::array<MPI_Status,N> status_recv;
250 MPI_Waitall(N,&requests_recv[0],&status_recv[0]);
253 remoterank = nextrank;
257 Dune::Hybrid::forEach(indices,
261 call_MPI_SetVectorSize<Args...>({
262 nextdata, status_recv
265 MPI_Waitall(N,&requests_send[0],&status_send[0]);
268 std::swap(remotedata,nextdata);
272 op(remoterank,std::get<Indices>(remotedata)...);
295template<
typename OP,
typename... Args>
298 const Args& ... data)
300 Impl::MPI_AllApply_impl(
302 std::forward<OP>(op),
303 std::make_index_sequence<
sizeof...(Args)>(),
Definition: gridglue.hh:35
void MPI_AllApply(MPI_Comm mpicomm, OP &&op, const Args &... data)
apply an operator locally to a difstributed data set
Definition: ringcomm.hh:296