6 #define CheckMPIStatus(A,B) {} 11 #include <dune/common/fvector.hh> 12 #include <dune/common/shared_ptr.hh> 13 #include <dune/common/hybridutilities.hh> 14 #include <dune/common/std/utility.hh> 15 #include <dune/common/std/apply.hh> 17 #include <dune/geometry/type.hh> 26 struct MPITypeInfo {};
29 struct MPITypeInfo< int >
31 static const unsigned int size = 1;
32 static inline MPI_Datatype getType()
38 template<
typename K,
int N>
39 struct MPITypeInfo< Dune::FieldVector<K,N> >
41 static const unsigned int size = N;
42 static inline MPI_Datatype getType()
44 return Dune::MPITraits<K>::getType();
49 struct MPITypeInfo< unsigned int >
51 static const unsigned int size = 1;
52 static inline MPI_Datatype getType()
59 struct MPITypeInfo< Dune::GeometryType >
61 static const unsigned int size = 1;
62 static inline MPI_Datatype getType()
64 return Dune::MPITraits< Dune::GeometryType >::getType();
69 void MPI_SetVectorSize(
70 std::vector<T> & data,
73 typedef MPITypeInfo<T> Info;
75 MPI_Get_count(&status, Info::getType(), &sz);
76 assert(sz%Info::size == 0);
77 data.resize(sz/Info::size);
90 void MPI_SendVectorInRing(
91 std::vector<T> & data,
92 std::vector<T> & next,
102 int result DUNE_UNUSED;
104 typedef MPITypeInfo<T> Info;
106 next.resize(next.capacity());
110 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, tag,
115 &(next[0]), Info::size*next.size(), Info::getType(), leftrank, tag,
127 template<
typename... Args>
128 struct call_MPI_SendVectorInRing
130 std::tuple<Args...> & remotedata;
131 std::tuple<Args...> & nextdata;
136 std::array<MPI_Request,
sizeof...(Args)> & requests_send;
137 std::array<MPI_Request,
sizeof...(Args)> & requests_recv;
142 MPI_SendVectorInRing(
143 std::get<i>(remotedata),
144 std::get<i>(nextdata),
146 rightrank, leftrank, mpicomm,
151 template<
typename... Args>
152 struct call_MPI_SetVectorSize
154 std::tuple<Args...> & nextdata;
155 std::array<MPI_Status,
sizeof...(Args)> & status_recv;
160 MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
164 template<
typename OP, std::size_t... Indices,
typename... Args>
165 void MPI_AllApply_impl(MPI_Comm mpicomm,
167 std::index_sequence<Indices...> indices,
170 constexpr std::size_t N =
sizeof...(Args);
174 MPI_Comm_rank(mpicomm, &myrank);
175 MPI_Comm_size(mpicomm, &commsize);
180 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE 181 std::cout << myrank <<
" Start Communication, size " << commsize << std::endl;
185 std::array<unsigned int, N> size({ ((
unsigned int)data.size())... });
188 std::array<unsigned int, N> maxSize;
189 MPI_Allreduce(&size, &maxSize,
190 size.size(), MPI_UNSIGNED, MPI_MAX, mpicomm);
191 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE 192 std::cout << myrank <<
" maxSize " <<
"done... " << std::endl;
196 std::tuple<Args...> remotedata { Args(maxSize[Indices])... };
199 remotedata = std::tie(data...);
202 std::tuple<Args...> nextdata { Args(maxSize[Indices])... };
205 int rightrank = (myrank + 1 + commsize) % commsize;
206 int leftrank = (myrank - 1 + commsize) % commsize;
208 std::cout << myrank <<
": size = " << commsize << std::endl;
209 std::cout << myrank <<
": left = " << leftrank
210 <<
" right = " << rightrank << std::endl;
213 int remoterank = myrank;
215 for (
int i=1; i<commsize; i++)
218 int nextrank = (myrank - i + commsize) % commsize;
220 std::cout << myrank <<
": next = " << nextrank << std::endl;
223 std::array<MPI_Request,N> requests_send;
224 std::array<MPI_Request,N> requests_recv;
227 Dune::Hybrid::forEach(indices,
237 call_MPI_SendVectorInRing<Args...>({
241 rightrank, leftrank, mpicomm,
247 op(remoterank,std::get<Indices>(remotedata)...);
250 std::array<MPI_Status,N> status_send;
251 std::array<MPI_Status,N> status_recv;
252 MPI_Waitall(N,&requests_recv[0],&status_recv[0]);
255 remoterank = nextrank;
259 Dune::Hybrid::forEach(indices,
263 call_MPI_SetVectorSize<Args...>({
264 nextdata, status_recv
267 MPI_Waitall(N,&requests_send[0],&status_send[0]);
270 std::swap(remotedata,nextdata);
274 op(remoterank,std::get<Indices>(remotedata)...);
297 template<
typename OP,
typename... Args>
300 const Args& ... data)
302 Impl::MPI_AllApply_impl(
304 std::forward<OP>(op),
305 std::make_index_sequence<
sizeof...(Args)>(),
void MPI_AllApply(MPI_Comm mpicomm, OP &&op, const Args &... data)
apply an operator locally to a difstributed data set
Definition: ringcomm.hh:298
Definition: gridglue.hh:35