dune-pdelab  2.4.1
parallelhelper.hh
Go to the documentation of this file.
1 // -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=8 sw=2 sts=2:
3 #ifndef DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
4 #define DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
5 
6 #include <limits>
7 
8 #include <dune/common/deprecated.hh>
9 #include <dune/common/parallel/mpihelper.hh>
10 #include <dune/common/stdstreams.hh>
11 #include <dune/common/typetraits.hh>
12 
13 #if HAVE_UG && PDELAB_SEQUENTIAL_UG
14 // We need the UGGrid declaration for the assertion
15 #include <dune/grid/uggrid.hh>
16 #endif
17 
18 #include <dune/istl/owneroverlapcopy.hh>
19 #include <dune/istl/solvercategory.hh>
20 #include <dune/istl/operators.hh>
21 #include <dune/istl/solvers.hh>
22 #include <dune/istl/preconditioners.hh>
23 #include <dune/istl/scalarproducts.hh>
24 #include <dune/istl/paamg/amg.hh>
25 #include <dune/istl/paamg/pinfo.hh>
26 #include <dune/istl/io.hh>
27 #include <dune/istl/superlu.hh>
28 
35 
36 namespace Dune {
37  namespace PDELab {
38  namespace istl {
39 
43 
44 
45  //========================================================
46  // A parallel helper class providing a nonoverlapping
47  // decomposition of all degrees of freedom
48  //========================================================
49 
50  template<typename GFS>
52  {
53 
55  typedef int RankIndex;
56 
60  using GhostVector = Dune::PDELab::Backend::Vector<GFS,bool>;
61 
63  typedef typename GFS::Ordering::Traits::ContainerIndex ContainerIndex;
64 
65  public:
66 
67  ParallelHelper (const GFS& gfs, int verbose = 1)
68  : _gfs(gfs)
69  , _rank(gfs.gridView().comm().rank())
70  , _ranks(gfs,_rank)
71  , _ghosts(gfs,false)
72  , _verbose(verbose)
73  {
74 
75  // Let's try to be clever and reduce the communication overhead by picking the smallest
76  // possible communication interface depending on the overlap structure of the GFS.
77  // FIXME: Switch to simple comparison as soon as dune-grid:1b3e83ec0 is reliably available!
78  if (gfs.entitySet().partitions().value == Partitions::interiorBorder.value)
79  {
80  // The GFS only spans the interior and border partitions, so we can skip sending or
81  // receiving anything else.
82  _interiorBorder_all_interface = InteriorBorder_InteriorBorder_Interface;
83  _all_all_interface = InteriorBorder_InteriorBorder_Interface;
84  }
85  else
86  {
87  // In general, we have to transmit more.
88  _interiorBorder_all_interface = InteriorBorder_All_Interface;
89  _all_all_interface = All_All_Interface;
90  }
91 
92  if (_gfs.gridView().comm().size()>1)
93  {
94 
95  // find out about ghosts
96  //GFSDataHandle<GFS,GhostVector,GhostGatherScatter>
98  gdh(_gfs,_ghosts,false);
99  _gfs.gridView().communicate(gdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
100 
101  // create disjoint DOF partitioning
102  // GFSDataHandle<GFS,RankVector,DisjointPartitioningGatherScatter<RankIndex> >
103  // ibdh(_gfs,_ranks,DisjointPartitioningGatherScatter<RankIndex>(_rank));
105  _gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
106 
107  }
108 
109  }
110 
112  template<typename X>
113  void maskForeignDOFs(X& x) const
114  {
115  using Backend::native;
116  // dispatch to implementation.
118  }
119 
120  private:
121 
122  // Implementation for block vector; recursively masks blocks.
123  template<typename X, typename Mask>
124  void maskForeignDOFs(istl::tags::block_vector, X& x, const Mask& mask) const
125  {
126  typename Mask::const_iterator mask_it = mask.begin();
127  for (typename X::iterator it = x.begin(),
128  end_it = x.end();
129  it != end_it;
130  ++it, ++mask_it)
131  maskForeignDOFs(istl::container_tag(*it),*it,*mask_it);
132  }
133 
134  // Implementation for field vector, iterates over entries and masks them individually.
135  template<typename X, typename Mask>
136  void maskForeignDOFs(istl::tags::field_vector, X& x, const Mask& mask) const
137  {
138  typename Mask::const_iterator mask_it = mask.begin();
139  for (typename X::iterator it = x.begin(),
140  end_it = x.end();
141  it != end_it;
142  ++it, ++mask_it)
143  *it = (*mask_it == _rank ? *it : typename X::field_type(0));
144  }
145 
146  public:
147 
149  bool owned(const ContainerIndex& i) const
150  {
151  return _ranks[i] == _rank;
152  }
153 
155  bool isGhost(const ContainerIndex& i) const
156  {
157  return _ghosts[i];
158  }
159 
161  template<typename X, typename Y>
162  typename PromotionTraits<
163  typename X::field_type,
164  typename Y::field_type
165  >::PromotedType
166  disjointDot(const X& x, const Y& y) const
167  {
168  using Backend::native;
170  native(x),
171  native(y),
172  native(_ranks)
173  );
174  }
175 
176  private:
177 
178  // Implementation for BlockVector, collects the result of recursively
179  // invoking the algorithm on the vector blocks.
180  template<typename X, typename Y, typename Mask>
181  typename PromotionTraits<
182  typename X::field_type,
183  typename Y::field_type
184  >::PromotedType
185  disjointDot(istl::tags::block_vector, const X& x, const Y& y, const Mask& mask) const
186  {
187  typedef typename PromotionTraits<
188  typename X::field_type,
189  typename Y::field_type
190  >::PromotedType result_type;
191 
192  result_type r(0);
193 
194  typename Y::const_iterator y_it = y.begin();
195  typename Mask::const_iterator mask_it = mask.begin();
196  for (typename X::const_iterator x_it = x.begin(),
197  end_it = x.end();
198  x_it != end_it;
199  ++x_it, ++y_it, ++mask_it)
200  r += disjointDot(istl::container_tag(*x_it),*x_it,*y_it,*mask_it);
201 
202  return r;
203  }
204 
205  // Implementation for FieldVector, iterates over the entries and calls Dune::dot() for DOFs
206  // associated with the current rank.
207  template<typename X, typename Y, typename Mask>
208  typename PromotionTraits<
209  typename X::field_type,
210  typename Y::field_type
211  >::PromotedType
212  disjointDot(istl::tags::field_vector, const X& x, const Y& y, const Mask& mask) const
213  {
214  typedef typename PromotionTraits<
215  typename X::field_type,
216  typename Y::field_type
217  >::PromotedType result_type;
218 
219  result_type r(0);
220 
221  typename Y::const_iterator y_it = y.begin();
222  typename Mask::const_iterator mask_it = mask.begin();
223  for (typename X::const_iterator x_it = x.begin(),
224  end_it = x.end();
225  x_it != end_it;
226  ++x_it, ++y_it, ++mask_it)
227  r += (*mask_it == _rank ? Dune::dot(*x_it,*y_it) : result_type(0));
228 
229  return r;
230  }
231 
232  public:
233 
235  RankIndex rank() const
236  {
237  return _rank;
238  }
239 
240 #if HAVE_MPI
241 
243 
259  template<typename MatrixType, typename Comm>
260  void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c);
261 
262  private:
263 
264  // Checks whether a matrix block is owned by the current process. Used for the AMG
265  // construction and thus assumes a single level of blocking and blocks with ownership
266  // restricted to a single DOF.
267  bool owned_for_amg(std::size_t i) const
268  {
269  return Backend::native(_ranks)[i][0] == _rank;
270  }
271 
272 #endif // HAVE_MPI
273 
274  private:
275 
276  const GFS& _gfs;
277  const RankIndex _rank;
278  RankVector _ranks; // vector to identify unique decomposition
279  GhostVector _ghosts; //vector to identify ghost dofs
280  int _verbose; //verbosity
281 
283  InterfaceType _interiorBorder_all_interface;
284 
286  InterfaceType _all_all_interface;
287  };
288 
289 #if HAVE_MPI
290 
291  template<typename GFS>
292  template<typename M, typename C>
294  {
295 
296  using Backend::native;
297 
298  const bool is_bcrs_matrix =
299  is_same<
300  typename istl::tags::container<
302  >::type::base_tag,
304  >::value;
305 
306  const bool block_type_is_field_matrix =
307  is_same<
308  typename istl::tags::container<
310  >::type::base_tag,
312  >::value;
313 
314  // We assume M to be a BCRSMatrix in the following, so better check for that
315  static_assert(is_bcrs_matrix && block_type_is_field_matrix, "matrix structure not compatible with AMG");
316 
317  // ********************************************************************************
318  // In the following, the code will always assume that all DOFs stored in a single
319  // block of the BCRSMatrix are attached to the same entity and can be handled
320  // identically. For that reason, the code often restricts itself to inspecting the
321  // first entry of the blocks in the diverse BlockVectors.
322  // ********************************************************************************
323 
324  typedef typename GFS::Traits::GridViewType GV;
325  typedef typename RankVector::size_type size_type;
326  const GV& gv = _gfs.gridView();
327 
328  // Do we need to communicate at all?
329  const bool need_communication = _gfs.gridView().comm().size() > 1;
330 
331  // First find out which dofs we share with other processors
332  using BoolVector = Backend::Vector<GFS,bool>;
333  BoolVector sharedDOF(_gfs, false);
334 
335  if (need_communication)
336  {
337  SharedDOFDataHandle<GFS,BoolVector> data_handle(_gfs,sharedDOF,false);
338  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
339  }
340 
341  // Count shared dofs that we own
342  typedef typename C::ParallelIndexSet::GlobalIndex GlobalIndex;
343  GlobalIndex count = 0;
344 
345  for (size_type i = 0; i < sharedDOF.N(); ++i)
346  if (owned_for_amg(i) && native(sharedDOF)[i][0])
347  ++count;
348 
349  dverb << gv.comm().rank() << ": shared block count is " << count.touint() << std::endl;
350 
351  // Communicate per-rank count of owned and shared DOFs to all processes.
352  std::vector<GlobalIndex> counts(_gfs.gridView().comm().size());
353  _gfs.gridView().comm().allgather(&count, 1, &(counts[0]));
354 
355  // Compute start index start_p = \sum_{i=0}^{i<p} counts_i
356  GlobalIndex start = std::accumulate(counts.begin(),counts.begin() + _rank,GlobalIndex(0));
357 
359  GIVector scalarIndices(_gfs, std::numeric_limits<GlobalIndex>::max());
360 
361  for (size_type i = 0; i < sharedDOF.N(); ++i)
362  if (owned_for_amg(i) && native(sharedDOF)[i][0])
363  {
364  native(scalarIndices)[i][0] = start;
365  ++start;
366  }
367 
368  // Publish global indices for the shared DOFS to other processors.
369  if (need_communication)
370  {
371  MinDataHandle<GFS,GIVector> data_handle(_gfs,scalarIndices);
372  _gfs.gridView().communicate(data_handle,_interiorBorder_all_interface,Dune::ForwardCommunication);
373  }
374 
375  // Setup the index set
376  c.indexSet().beginResize();
377  for (size_type i=0; i<scalarIndices.N(); ++i)
378  {
379  Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr;
380  if(native(scalarIndices)[i][0] != std::numeric_limits<GlobalIndex>::max())
381  {
382  // global index exist in index set
383  if (owned_for_amg(i))
384  {
385  // This dof is managed by us.
386  attr = Dune::OwnerOverlapCopyAttributeSet::owner;
387  }
388  else
389  {
390  attr = Dune::OwnerOverlapCopyAttributeSet::copy;
391  }
392  c.indexSet().add(native(scalarIndices)[i][0], typename C::ParallelIndexSet::LocalIndex(i,attr));
393  }
394  }
395  c.indexSet().endResize();
396 
397  // Compute neighbors using communication
398  std::set<int> neighbors;
399 
400  if (need_communication)
401  {
402  GFSNeighborDataHandle<GFS,int> data_handle(_gfs,_rank,neighbors);
403  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
404  }
405 
406  c.remoteIndices().setNeighbours(neighbors);
407  c.remoteIndices().template rebuild<false>();
408  }
409 
410 #endif // HAVE_MPI
411 
412  template<int s, bool isFakeMPIHelper>
414  {
415  typedef Dune::Amg::SequentialInformation type;
416  };
417 
418 
419 #if HAVE_MPI
420 
421  // Need MPI for OwnerOverlapCopyCommunication
422  template<int s>
423  struct CommSelector<s,false>
424  {
425  typedef OwnerOverlapCopyCommunication<bigunsignedint<s>,int> type;
426  };
427 
428 #endif // HAVE_MPI
429 
430  template<typename T>
431  void assertParallelUG(T comm)
432  {}
433 
434 #if HAVE_UG && PDELAB_SEQUENTIAL_UG
435  template<int dim>
436  void assertParallelUG(Dune::CollectiveCommunication<Dune::UGGrid<dim> > comm)
437  {
438  static_assert(Dune::AlwaysFalse<Dune::UGGrid<dim> >::value, "Using sequential UG in parallel environment");
439  };
440 #endif
441 
443  } // namespace istl
444  } // namespace PDELab
445 } // namespace Dune
446 
447 #endif // DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
std::enable_if< std::is_base_of< impl::WrapperBase, T >::value, Native< T > & >::type native(T &t)
Definition: backend/interface.hh:199
GatherScatter data handle for creating a disjoint DOF partitioning.
Definition: genericdatahandle.hh:930
Definition: genericdatahandle.hh:716
Tag describing a BCRSMatrix.
Definition: backend/istl/tags.hh:60
Data handle for marking ghost DOFs.
Definition: genericdatahandle.hh:812
Data handle for marking shared DOFs.
Definition: genericdatahandle.hh:1014
Definition: parallelhelper.hh:51
static const unsigned int value
Definition: gridfunctionspace/tags.hh:177
void createIndexSetAndProjectForAMG(MatrixType &m, Comm &c)
Makes the matrix consistent and creates the parallel information for AMG.
Definition: parallelhelper.hh:413
typename native_type< T >::type Native
Alias of the native container type associated with T or T itself if it is not a backend wrapper...
Definition: backend/interface.hh:183
Tag describing an arbitrary FieldVector.
Definition: backend/istl/tags.hh:43
tags::container< T >::type container_tag(const T &)
Gets instance of container tag associated with T.
Definition: backend/istl/tags.hh:246
bool owned(const ContainerIndex &i) const
Tests whether the given index is owned by this process.
Definition: parallelhelper.hh:149
OwnerOverlapCopyCommunication< bigunsignedint< s >, int > type
Definition: parallelhelper.hh:425
Definition: adaptivity.hh:27
Tag describing an arbitrary FieldMatrix.
Definition: backend/istl/tags.hh:80
Dune::Amg::SequentialInformation type
Definition: parallelhelper.hh:415
Extracts the container tag from T.
Definition: backend/istl/tags.hh:142
RankIndex rank() const
Returns the MPI rank of this process.
Definition: parallelhelper.hh:235
bool isGhost(const ContainerIndex &i) const
Tests whether the given index belongs to a ghost DOF.
Definition: parallelhelper.hh:155
ParallelHelper(const GFS &gfs, int verbose=1)
Definition: parallelhelper.hh:67
typename impl::BackendVectorSelector< GridFunctionSpace, FieldType >::Type Vector
alias of the return type of BackendVectorSelector
Definition: backend/interface.hh:113
const std::string s
Definition: function.hh:1102
void assertParallelUG(T comm)
Definition: parallelhelper.hh:431
PromotionTraits< typename X::field_type, typename Y::field_type >::PromotedType disjointDot(const X &x, const Y &y) const
Calculates the (rank-local) dot product of x and y on the disjoint partition defined by the helper...
Definition: parallelhelper.hh:166
Data handle for collecting set of neighboring MPI ranks.
Definition: genericdatahandle.hh:1060
void maskForeignDOFs(X &x) const
Mask out all DOFs not owned by the current process with 0.
Definition: parallelhelper.hh:113
Tag describing a BlockVector.
Definition: backend/istl/tags.hh:23