dune-grid-glue 2.9
Loading...
Searching...
No Matches
gridglue.cc
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3// SPDX-FileCopyrightInfo: Copyright © DUNE Project contributors, see file LICENSE.md in module root
4// SPDX-License-Identifier: LGPL-3.0-or-later OR LicenseRef-GPL-2.0-only-with-dune-grid-glue-exception
5/* IMPLEMENTATION OF CLASS G R I D G L U E */
6
7#include "intersection.hh"
8#include <vector>
9#include <iterator>
10#include "../gridglue.hh"
11#if HAVE_MPI
12#include "../common/ringcomm.hh"
13#endif
14
15#include <dune/common/unused.hh>
16
17namespace Dune {
18namespace GridGlue {
19
20template<typename P0, typename P1>
21GridGlue<P0, P1>::GridGlue(const std::shared_ptr< const GridPatch<0> >& gp0, const std::shared_ptr< const GridPatch<1> >& gp1, const std::shared_ptr<Merger>& merger)
22 : patches_{gp0, gp1}, merger_(merger)
23{
24#if HAVE_MPI
25 // if we have only seq. meshes don't use parallel glueing
26 if (gp0->gridView().comm().size() == 1
27 && gp1->gridView().comm().size() == 1)
28 mpicomm_ = MPI_COMM_SELF;
29 else
30 mpicomm_ = MPI_COMM_WORLD;
31#endif // HAVE_MPI
32 std::cout << "GridGlue: Constructor succeeded!" << std::endl;
33}
34
35template<typename P0, typename P1>
37{
38 int myrank = 0;
39#if HAVE_MPI
40 int commsize = 1;
41 MPI_Comm_rank(mpicomm_, &myrank);
42 MPI_Comm_size(mpicomm_, &commsize);
43#endif // HAVE_MPI
44
45 // clear the contents from the current intersections array
46 {
47 std::vector<IntersectionData> dummy(1); // we need size 1, as we always store data for the end-intersection
48 intersections_.swap(dummy);
49 }
50
51 std::vector<Dune::FieldVector<ctype, dimworld> > patch0coords;
52 std::vector<unsigned int> patch0entities;
53 std::vector<Dune::GeometryType> patch0types;
54 std::vector<Dune::FieldVector<ctype,dimworld> > patch1coords;
55 std::vector<unsigned int> patch1entities;
56 std::vector<Dune::GeometryType> patch1types;
57
58 /*
59 * extract global surface patchs
60 */
61
62 // retrieve the coordinate and topology information from the extractors
63 // and apply transformations if necessary
64 extractGrid(patch<0>(), patch0coords, patch0entities, patch0types);
65 extractGrid(patch<1>(), patch1coords, patch1entities, patch1types);
66
67 std::cout << ">>>> rank " << myrank << " coords: "
68 << patch0coords.size() << " and " << patch1coords.size() << std::endl;
69 std::cout << ">>>> rank " << myrank << " entities: "
70 << patch0entities.size() << " and " << patch1entities.size() << std::endl;
71 std::cout << ">>>> rank " << myrank << " types: "
72 << patch0types.size() << " and " << patch1types.size() << std::endl;
73
74#ifdef WRITE_TO_VTK
75 const char prefix[] = "GridGlue::Builder::build() : ";
76 char patch0surf[256];
77 sprintf(patch0surf, "/tmp/vtk-patch0-test-%i", myrank);
78 char patch1surf[256];
79 sprintf(patch1surf, "/tmp/vtk-patch1-test-%i", myrank);
80
81 // std::cout << prefix << "Writing patch0 surface to '" << patch0surf << ".vtk'...\n";
82 // VtkSurfaceWriter vtksw(patch0surf);
83 // vtksw.writeSurface(patch0coords, patch0entities, grid0dim, dimworld);
84 // std::cout << prefix << "Done writing patch0 surface!\n";
85
86 // std::cout << prefix << "Writing patch1 surface to '" << patch1surf << ".vtk'...\n";
87 // vtksw.setFilename(patch1surf);
88 // vtksw.writeSurface(patch1coords, patch1entities, grid1dim, dimworld);
89 // std::cout << prefix << "Done writing patch1 surface!\n";
90#endif // WRITE_TO_VTK
91
92 // we start with an empty set
93 index__sz = 0;
94
95#if HAVE_MPI
96 if (commsize > 1)
97 {
98 // setup parallel indexset
99 patch0_is_.beginResize();
100 patch1_is_.beginResize();
101 }
102
103 auto op =
104 [&](
105 const int mergingrank,
106 const std::vector<Dune::FieldVector<ctype,dimworld> >& remotePatch0coords,
107 const std::vector<unsigned int>& remotePatch0entities,
108 const std::vector<Dune::GeometryType>& remotePatch0types,
109 const std::vector<Dune::FieldVector<ctype,dimworld> >& remotePatch1coords,
110 const std::vector<unsigned int>& remotePatch1entities,
111 const std::vector<Dune::GeometryType>& remotePatch1types
112 )
113 {
114 if (remotePatch1entities.size() > 0 && patch0entities.size() > 0)
115 mergePatches(patch0coords, patch0entities, patch0types, myrank,
116 remotePatch1coords, remotePatch1entities, remotePatch1types, mergingrank);
117 if (mergingrank != myrank &&
118 remotePatch0entities.size() > 0 && patch1entities.size() > 0)
119 mergePatches(remotePatch0coords, remotePatch0entities, remotePatch0types, mergingrank,
120 patch1coords, patch1entities, patch1types, myrank);
121 };
122 Parallel::MPI_AllApply(mpicomm_, op,
123 patch0coords, patch0entities, patch0types,
124 patch1coords, patch1entities, patch1types
125 );
126
127 if (commsize > 1)
128 {
129 // finalize ParallelIndexSet & RemoteIndices
130 patch0_is_.endResize();
131 patch1_is_.endResize();
132
133 // setup remote index information
134 remoteIndices_.setIncludeSelf(true);
135 // #warning add list of neighbors ...
136 remoteIndices_.setIndexSets(patch0_is_, patch1_is_, mpicomm_) ;
137 remoteIndices_.rebuild<true/* all indices are public */>();
138
139 // DEBUG Print all remote indices
140#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
141 for (auto it = remoteIndices_.begin(); it != remoteIndices_.end(); it++)
142 {
143 std::cout << myrank << "\tri-list\t" << it->first << std::endl;
144 for (auto xit = it->second.first->begin(); xit != it->second.first->end(); ++xit)
145 std::cout << myrank << "\tri-list 1 \t" << it->first << "\t" << *xit << std::endl;
146 for (auto xit = it->second.second->begin(); xit != it->second.second->end(); ++xit)
147 std::cout << myrank << "\tri-list 2 \t" << it->first << "\t" << *xit << std::endl;
148 }
149#endif
150 }
151#else // HAVE_MPI
152
153 if (patch1entities.size() > 0 && patch0entities.size() > 0)
154 {
155 mergePatches(patch0coords, patch0entities, patch0types, myrank,
156 patch1coords, patch1entities, patch1types, myrank);
157#ifdef CALL_MERGER_TWICE
158 mergePatches(patch0coords, patch0entities, patch0types, myrank,
159 patch1coords, patch1entities, patch1types, myrank);
160#endif
161 }
162
163#endif // HAVE_MPI
164
165}
166
167template<typename T>
168void printVector(const std::vector<T> & v, std::string name, int rank)
169{
170 std::cout << rank << ": " << name << std::endl;
171 for (size_t i=0; i<v.size(); i++)
172 {
173 std::cout << v[i] << " ";
174 }
175 std::cout << std::endl;
176}
177
178template<typename P0, typename P1>
180 const std::vector<Dune::FieldVector<ctype,dimworld> >& patch0coords,
181 const std::vector<unsigned int>& patch0entities,
182 const std::vector<Dune::GeometryType>& patch0types,
183 const int patch0rank,
184 const std::vector<Dune::FieldVector<ctype,dimworld> >& patch1coords,
185 const std::vector<unsigned int>& patch1entities,
186 const std::vector<Dune::GeometryType>& patch1types,
187 const int patch1rank)
188{
189
190 // howto handle overlap etc?
191
192 int myrank = 0;
193#if HAVE_MPI
194 int commsize = 1;
195 MPI_Comm_rank(mpicomm_, &myrank);
196 MPI_Comm_size(mpicomm_, &commsize);
197#endif // HAVE_MPI
198
199 // which patches are local?
200 const bool patch0local = (myrank == patch0rank);
201 const bool patch1local = (myrank == patch1rank);
202
203 // remember the number of previous remote intersections
204 const unsigned int offset = intersections_.size()-1;
205
206 std::cout << myrank
207 << " GridGlue::mergePatches : rank " << patch0rank << " / " << patch1rank << std::endl;
208
209 // start the actual build process
210 merger_->build(patch0coords, patch0entities, patch0types,
211 patch1coords, patch1entities, patch1types);
212
213 // append to intersections list
214 intersections_.resize(merger_->nSimplices() + offset + 1);
215 for (unsigned int i = 0; i < merger_->nSimplices(); ++i)
216 intersections_[offset + i] = IntersectionData(*this, i, offset, patch0local, patch1local);
217
218 index__sz = intersections_.size() - 1;
219
220 std::cout << myrank
221 << " GridGlue::mergePatches : "
222 << "The number of remote intersections is " << intersections_.size()-1 << std::endl;
223
224#if 0
225 printVector(patch0coords,"patch0coords",myrank);
226 printVector(patch0entities,"patch0entities",myrank);
227 printVector(patch0types,"patch0types",myrank);
228 printVector(patch1coords,"patch1coords",myrank);
229 printVector(patch1entities,"patch1entities",myrank);
230 printVector(patch1types,"patch1types",myrank);
231#endif
232
233#if HAVE_MPI
234 if (commsize > 1)
235 {
236 // update remote index sets
237 assert(Dune::RESIZE == patch0_is_.state());
238 assert(Dune::RESIZE == patch1_is_.state());
239
240 for (unsigned int i = 0; i < merger_->nSimplices(); i++)
241 {
242 // #warning only handle the newest intersections / merger info
243 const IntersectionData & it = intersections_[i];
244 GlobalId gid(patch0rank, patch1rank, i);
245 if (it.template local<0>())
246 {
247 Dune::PartitionType ptype = patch<0>().element(it.template index<0>()).partitionType();
248 patch0_is_.add (gid, LocalIndex(offset+i, ptype) );
249 }
250 if (it.template local<1>())
251 {
252 Dune::PartitionType ptype = patch<1>().element(it.template index<1>()).partitionType();
253 patch1_is_.add (gid, LocalIndex(offset+i, ptype) );
254 }
255 }
256 }
257#endif // HAVE_MPI
258
259 // cleanup the merger
260 merger_->clear();
261}
262
263template<typename P0, typename P1>
264template<typename Extractor>
266 std::vector<Dune::FieldVector<ctype, dimworld> > & coords,
267 std::vector<unsigned int> & entities,
268 std::vector<Dune::GeometryType>& geometryTypes) const
269{
270 std::vector<typename Extractor::Coords> tempcoords;
271 std::vector<typename Extractor::VertexVector> tempentities;
272
273 extractor.getCoords(tempcoords);
274 coords.clear();
275 coords.reserve(tempcoords.size());
276
277 for (unsigned int i = 0; i < tempcoords.size(); ++i)
278 {
279 assert(int(dimworld) == int(Extractor::dimworld));
280 coords.push_back(Dune::FieldVector<ctype, dimworld>());
281 for (size_t j = 0; j <dimworld; ++j)
282 coords.back()[j] = tempcoords[i][j];
283 }
284
285 extractor.getFaces(tempentities);
286 entities.clear();
287
288 for (unsigned int i = 0; i < tempentities.size(); ++i) {
289 for (unsigned int j = 0; j < tempentities[i].size(); ++j)
290 entities.push_back(tempentities[i][j]);
291 }
292
293 // get the list of geometry types from the extractor
294 extractor.getGeometryTypes(geometryTypes);
295
296}
297
298template<typename P0, typename P1>
299template<class DataHandleImp, class DataTypeImp>
302 Dune::InterfaceType iftype, Dune::CommunicationDirection dir) const
303{
305 typedef typename DataHandle::DataType DataType;
306
307#if HAVE_MPI
308
309 if (mpicomm_ != MPI_COMM_SELF)
310 {
311 /*
312 * P A R A L L E L V E R S I O N
313 */
314 // setup communication interfaces
315 Dune::dinfo << "GridGlue: parallel communication" << std::endl;
316 typedef Dune::EnumItem <Dune::PartitionType, Dune::InteriorEntity> InteriorFlags;
317 typedef Dune::EnumItem <Dune::PartitionType, Dune::OverlapEntity> OverlapFlags;
318 typedef Dune::EnumRange <Dune::PartitionType, Dune::InteriorEntity, Dune::GhostEntity> AllFlags;
319 Dune::Interface interface;
320 assert(remoteIndices_.isSynced());
321 switch (iftype)
322 {
323 case Dune::InteriorBorder_InteriorBorder_Interface :
324 interface.build (remoteIndices_, InteriorFlags(), InteriorFlags() );
325 break;
326 case Dune::InteriorBorder_All_Interface :
327 if (dir == Dune::ForwardCommunication)
328 interface.build (remoteIndices_, InteriorFlags(), AllFlags() );
329 else
330 interface.build (remoteIndices_, AllFlags(), InteriorFlags() );
331 break;
332 case Dune::Overlap_OverlapFront_Interface :
333 interface.build (remoteIndices_, OverlapFlags(), OverlapFlags() );
334 break;
335 case Dune::Overlap_All_Interface :
336 if (dir == Dune::ForwardCommunication)
337 interface.build (remoteIndices_, OverlapFlags(), AllFlags() );
338 else
339 interface.build (remoteIndices_, AllFlags(), OverlapFlags() );
340 break;
341 case Dune::All_All_Interface :
342 interface.build (remoteIndices_, AllFlags(), AllFlags() );
343 break;
344 default :
345 DUNE_THROW(Dune::NotImplemented, "GridGlue::communicate for interface " << iftype << " not implemented");
346 }
347
348 // setup communication info (class needed to tunnel all info to the operator)
350 CommInfo commInfo;
351 commInfo.dir = dir;
352 commInfo.gridglue = this;
353 commInfo.data = &data;
354
355 // create communicator
356 Dune::BufferedCommunicator bComm ;
357 bComm.template build< CommInfo >(commInfo, commInfo, interface);
358
359 // do communication
360 // choose communication direction.
361 if (dir == Dune::ForwardCommunication)
362 bComm.forward< Dune::GridGlue::ForwardOperator >(commInfo, commInfo);
363 else
364 bComm.backward< Dune::GridGlue::BackwardOperator >(commInfo, commInfo);
365 }
366 else
367#endif // HAVE_MPI
368 {
369 /*
370 * S E Q U E N T I A L V E R S I O N
371 */
372 Dune::dinfo << "GridGlue: sequential fallback communication" << std::endl;
373
374 // get comm buffer size
375 int ssz = size() * 10; // times data per intersection
376 int rsz = size() * 10;
377
378 // allocate send/receive buffer
379 auto sendbuffer = std::make_unique<DataType[]>(ssz);
380 auto receivebuffer = std::make_unique<DataType[]>(rsz);
381
382 // gather
383 Dune::GridGlue::StreamingMessageBuffer<DataType> gatherbuffer(sendbuffer.get());
384 for (const auto& in : intersections(*this))
385 {
386 /*
387 we need to have to variants depending on the communication direction.
388 */
389 if (dir == Dune::ForwardCommunication)
390 {
391 /*
392 dir : Forward (grid0 -> grid1)
393 */
394 if (in.self())
395 {
396 data.gather(gatherbuffer, in.inside(), in);
397 }
398 }
399 else // (dir == Dune::BackwardCommunication)
400 {
401 /*
402 dir : Backward (grid1 -> grid0)
403 */
404 if (in.neighbor())
405 {
406 data.gather(gatherbuffer, in.outside(), in.flip());
407 }
408 }
409 }
410
411 assert(ssz == rsz);
412 for (int i=0; i<ssz; i++)
413 receivebuffer[i] = sendbuffer[i];
414
415 // scatter
416 Dune::GridGlue::StreamingMessageBuffer<DataType> scatterbuffer(receivebuffer.get());
417 for (const auto& in : intersections(*this))
418 {
419 /*
420 we need to have to variants depending on the communication direction.
421 */
422 if (dir == Dune::ForwardCommunication)
423 {
424 /*
425 dir : Forward (grid0 -> grid1)
426 */
427 if (in.neighbor())
428 data.scatter(scatterbuffer, in.outside(), in.flip(),
429 data.size(in));
430 }
431 else // (dir == Dune::BackwardCommunication)
432 {
433 /*
434 dir : Backward (grid1 -> grid0)
435 */
436 if (in.self())
437 data.scatter(scatterbuffer, in.inside(), in,
438 data.size(in));
439 }
440 }
441 }
442}
443
444} // end namespace GridGlue
445} // end namespace Dune
Model of the Intersection concept provided by GridGlue.
Definition gridglue.hh:37
IteratorRange<... > intersections(const GridGlue<... > &glue, const Reverse<... > &reverse=!reversed)
Iterate over all intersections of a GridGlue.
void printVector(const std::vector< T > &v, std::string name, int rank)
Definition gridglue.cc:168
void MPI_AllApply(MPI_Comm mpicomm, OP &&op, const Args &... data)
apply an operator locally to a difstributed data set
Definition ringcomm.hh:297
sequential adapter to couple two grids at specified close together boundaries
Definition gridglue.hh:67
void mergePatches(const std::vector< Dune::FieldVector< ctype, dimworld > > &patch0coords, const std::vector< unsigned int > &patch0entities, const std::vector< Dune::GeometryType > &patch0types, const int patch0rank, const std::vector< Dune::FieldVector< ctype, dimworld > > &patch1coords, const std::vector< unsigned int > &patch1entities, const std::vector< Dune::GeometryType > &patch1types, const int patch1rank)
after building the merged grid the intersection can be updated through this method (for internal use)
Definition gridglue.cc:179
void communicate(Dune::GridGlue::CommDataHandle< DataHandleImp, DataTypeImp > &data, Dune::InterfaceType iftype, Dune::CommunicationDirection dir) const
Communicate information on the MergedGrid of a GridGlue.
Definition gridglue.cc:300
void build()
Definition gridglue.cc:36
void extractGrid(const Extractor &extractor, std::vector< Dune::FieldVector< ctype, dimworld > > &coords, std::vector< unsigned int > &faces, std::vector< Dune::GeometryType > &geometryTypes) const
Definition gridglue.cc:265
std::conditional_t< side==0, P0, std::conditional_t< side==1, P1, void > > GridPatch
Definition gridglue.hh:96
storage class for Dune::GridGlue::Intersection related data
Definition intersection.hh:38
Definition gridgluecommunicate.hh:26
describes the features of a data handle for communication in parallel runs using the GridGlue::commun...
Definition gridgluecommunicate.hh:77
size_t size(RISType &i) const
Definition gridgluecommunicate.hh:92
void scatter(MessageBufferImp &buff, const EntityType &e, const RISType &i, size_t n)
Definition gridgluecommunicate.hh:118
void gather(MessageBufferImp &buff, const EntityType &e, const RISType &i) const
pack data from user to message buffer
Definition gridgluecommunicate.hh:104
Definition gridgluecommunicate.hh:141
forward gather scatter to user defined CommInfo class
Definition gridgluecommunicate.hh:194
collects all GridGlue data requried for communication
Definition gridgluecommunicate.hh:272
Dune::CommunicationDirection dir
Definition gridgluecommunicate.hh:288
::Dune::GridGlue::CommDataHandle< DataHandleImp, DataTypeImp > * data
Definition gridgluecommunicate.hh:282
const GridGlue * gridglue
Definition gridgluecommunicate.hh:281
Provides codimension-independent methods for grid extraction.
Definition extractor.hh:46
static constexpr auto dimworld
Definition extractor.hh:50
void getFaces(std::vector< VertexVector > &faces) const
Get the corners of the extracted subentities.
Definition extractor.hh:304
void getGeometryTypes(std::vector< Dune::GeometryType > &geometryTypes) const
Get the list of geometry types.
Definition extractor.hh:293
void getCoords(std::vector< Dune::FieldVector< ctype, dimworld > > &coords) const
getter for the coordinates array
Definition extractor.hh:275