dune-istl 2.9.0
Loading...
Searching...
No Matches
matrixhierarchy.hh
Go to the documentation of this file.
1// SPDX-FileCopyrightText: Copyright (C) DUNE Project contributors, see file LICENSE.md in module root
2// SPDX-License-Identifier: LicenseRef-GPL-2.0-only-with-DUNE-exception
3// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
4// vi: set et ts=4 sw=2 sts=2:
5#ifndef DUNE_AMG_MATRIXHIERARCHY_HH
6#define DUNE_AMG_MATRIXHIERARCHY_HH
7
8#include <algorithm>
9#include <tuple>
10#include "aggregates.hh"
11#include "graph.hh"
12#include "galerkin.hh"
13#include "renumberer.hh"
14#include "graphcreator.hh"
15#include "hierarchy.hh"
16#include <dune/istl/bvector.hh>
17#include <dune/common/parallel/indexset.hh>
27
28namespace Dune
29{
30 namespace Amg
31 {
42 enum {
50 MAX_PROCESSES = 72000
51 };
52
59 template<class M, class PI, class A=std::allocator<M> >
61 {
62 public:
64 typedef M MatrixOperator;
65
67 typedef typename MatrixOperator::matrix_type Matrix;
68
71
73 typedef A Allocator;
74
77
80
83
85 using AAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<AggregatesMap*>;
86
88 typedef std::list<AggregatesMap*,AAllocator> AggregatesMapList;
89
92
94 using RILAllocator = typename std::allocator_traits<Allocator>::template rebind_alloc<RedistributeInfoType>;
95
97 typedef std::list<RedistributeInfoType,RILAllocator> RedistributeInfoList;
98
104 MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
105 std::shared_ptr<ParallelInformation> pinfo = std::make_shared<ParallelInformation>());
106
108
114 template<typename O, typename T>
115 void build(const T& criterion);
116
124 template<class F>
125 void recalculateGalerkin(const F& copyFlags);
126
131 template<class V, class BA, class TA>
132 void coarsenVector(Hierarchy<BlockVector<V,BA>, TA>& hierarchy) const;
133
139 template<class S, class TA>
140 void coarsenSmoother(Hierarchy<S,TA>& smoothers,
141 const typename SmootherTraits<S>::Arguments& args) const;
142
147 std::size_t levels() const;
148
153 std::size_t maxlevels() const;
154
155 bool hasCoarsest() const;
156
161 bool isBuilt() const;
162
167 const ParallelMatrixHierarchy& matrices() const;
168
174
179 const AggregatesMapList& aggregatesMaps() const;
180
187
189 {
190 return prolongDamp_;
191 }
192
203 void getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const;
204
205 private:
206 typedef typename ConstructionTraits<MatrixOperator>::Arguments MatrixArgs;
207 typedef typename ConstructionTraits<ParallelInformation>::Arguments CommunicationArgs;
209 AggregatesMapList aggregatesMaps_;
211 RedistributeInfoList redistributes_;
213 ParallelMatrixHierarchy matrices_;
215 ParallelInformationHierarchy parallelInformation_;
216
218 bool built_;
219
221 int maxlevels_;
222
223 double prolongDamp_;
224
228 template<class Matrix, bool print>
229 struct MatrixStats
230 {
231
235 static void stats([[maybe_unused]] const Matrix& matrix)
236 {}
237 };
238
239 template<class Matrix>
240 struct MatrixStats<Matrix,true>
241 {
242 struct calc
243 {
244 typedef typename Matrix::size_type size_type;
245 typedef typename Matrix::row_type matrix_row;
246
248 {
249 min=std::numeric_limits<size_type>::max();
250 max=0;
251 sum=0;
252 }
253
254 void operator()(const matrix_row& row)
255 {
256 min=std::min(min, row.size());
257 max=std::max(max, row.size());
258 sum += row.size();
259 }
260
264 };
268 static void stats(const Matrix& matrix)
269 {
270 calc c= for_each(matrix.begin(), matrix.end(), calc());
271 dinfo<<"Matrix row: min="<<c.min<<" max="<<c.max
272 <<" average="<<static_cast<double>(c.sum)/matrix.N()
273 <<std::endl;
274 }
275 };
276 };
277
281 template<class T>
282 class CoarsenCriterion : public T
283 {
284 public:
290
301 CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2,
302 double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
303 : AggregationCriterion(Dune::Amg::Parameters(maxLevel, coarsenTarget, minCoarsenRate, prolongDamp, accumulate))
304 {}
305
309
310 };
311
312 template<typename M, typename C1>
313 bool repartitionAndDistributeMatrix([[maybe_unused]] const M& origMatrix,
314 [[maybe_unused]] std::shared_ptr<M> newMatrix,
315 [[maybe_unused]] SequentialInformation& origComm,
316 [[maybe_unused]] std::shared_ptr<SequentialInformation>& newComm,
318 [[maybe_unused]] int nparts,
319 [[maybe_unused]] C1& criterion)
320 {
321 DUNE_THROW(NotImplemented, "Redistribution does not make sense in sequential code!");
322 }
323
324
325 template<typename M, typename C, typename C1>
326 bool repartitionAndDistributeMatrix(const M& origMatrix,
327 std::shared_ptr<M> newMatrix,
328 C& origComm,
329 std::shared_ptr<C>& newComm,
331 int nparts, C1& criterion)
332 {
333 Timer time;
334#ifdef AMG_REPART_ON_COMM_GRAPH
335 // Done not repartition the matrix graph, but a graph of the communication scheme.
336 bool existentOnRedist=Dune::commGraphRepartition(origMatrix, origComm, nparts, newComm,
337 ri.getInterface(),
338 criterion.debugLevel()>1);
339
340#else
345 IdentityMap,
346 IdentityMap> PropertiesGraph;
347 MatrixGraph graph(origMatrix);
348 PropertiesGraph pgraph(graph);
349 buildDependency(pgraph, origMatrix, criterion, false);
350
351#ifdef DEBUG_REPART
352 if(origComm.communicator().rank()==0)
353 std::cout<<"Original matrix"<<std::endl;
354 origComm.communicator().barrier();
355 printGlobalSparseMatrix(origMatrix, origComm, std::cout);
356#endif
357 bool existentOnRedist=Dune::graphRepartition(pgraph, origComm, nparts,
358 newComm, ri.getInterface(),
359 criterion.debugLevel()>1);
360#endif // if else AMG_REPART
361
362 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
363 std::cout<<"Repartitioning took "<<time.elapsed()<<" seconds."<<std::endl;
364
365 ri.setSetup();
366
367#ifdef DEBUG_REPART
368 ri.checkInterface(origComm.indexSet(), newComm->indexSet(), origComm.communicator());
369#endif
370
371 redistributeMatrix(const_cast<M&>(origMatrix), *newMatrix, origComm, *newComm, ri);
372
373#ifdef DEBUG_REPART
374 if(origComm.communicator().rank()==0)
375 std::cout<<"Original matrix"<<std::endl;
376 origComm.communicator().barrier();
377 if(newComm->communicator().size()>0)
378 printGlobalSparseMatrix(*newMatrix, *newComm, std::cout);
379 origComm.communicator().barrier();
380#endif
381
382 if(origComm.communicator().rank()==0 && criterion.debugLevel()>1)
383 std::cout<<"Redistributing matrix took "<<time.elapsed()<<" seconds."<<std::endl;
384 return existentOnRedist;
385
386 }
387
388 template<class M, class IS, class A>
389 MatrixHierarchy<M,IS,A>::MatrixHierarchy(std::shared_ptr<MatrixOperator> fineMatrix,
390 std::shared_ptr<ParallelInformation> pinfo)
391 : matrices_(fineMatrix),
392 parallelInformation_(pinfo)
393 {
394 if (SolverCategory::category(*fineMatrix) != SolverCategory::category(*pinfo))
395 DUNE_THROW(ISTLError, "MatrixOperator and ParallelInformation must belong to the same category!");
396 }
397
398 template<class M, class IS, class A>
399 template<typename O, typename T>
400 void MatrixHierarchy<M,IS,A>::build(const T& criterion)
401 {
402 prolongDamp_ = criterion.getProlongationDampingFactor();
403 typedef O OverlapFlags;
404 typedef typename ParallelMatrixHierarchy::Iterator MatIterator;
405 typedef typename ParallelInformationHierarchy::Iterator PInfoIterator;
406
407 static const int noints=(Dune::Amg::MAX_PROCESSES/4096>0) ? (Dune::Amg::MAX_PROCESSES/4096) : 1;
408
409 typedef bigunsignedint<sizeof(int)*8*noints> BIGINT;
411 MatIterator mlevel = matrices_.finest();
412 MatrixStats<typename M::matrix_type,MINIMAL_DEBUG_LEVEL<=INFO_DEBUG_LEVEL>::stats(mlevel->getmat());
413
414 PInfoIterator infoLevel = parallelInformation_.finest();
415 BIGINT finenonzeros=countNonZeros(mlevel->getmat());
416 finenonzeros = infoLevel->communicator().sum(finenonzeros);
417 BIGINT allnonzeros = finenonzeros;
418
419
420 int level = 0;
421 int rank = 0;
422
423 BIGINT unknowns = mlevel->getmat().N();
424
425 unknowns = infoLevel->communicator().sum(unknowns);
426 double dunknowns=unknowns.todouble();
427 infoLevel->buildGlobalLookup(mlevel->getmat().N());
428 redistributes_.push_back(RedistributeInfoType());
429
430 for(; level < criterion.maxLevel(); ++level, ++mlevel) {
431 assert(matrices_.levels()==redistributes_.size());
432 rank = infoLevel->communicator().rank();
433 if(rank==0 && criterion.debugLevel()>1)
434 std::cout<<"Level "<<level<<" has "<<dunknowns<<" unknowns, "<<dunknowns/infoLevel->communicator().size()
435 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
436
437 MatrixOperator* matrix=&(*mlevel);
438 ParallelInformation* info =&(*infoLevel);
439
440 if((
441#if HAVE_PARMETIS
442 criterion.accumulate()==successiveAccu
443#else
444 false
445#endif
446 || (criterion.accumulate()==atOnceAccu
447 && dunknowns < 30*infoLevel->communicator().size()))
448 && infoLevel->communicator().size()>1 &&
449 dunknowns/infoLevel->communicator().size() <= criterion.coarsenTarget())
450 {
451 // accumulate to fewer processors
452 std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
453 std::shared_ptr<ParallelInformation> redistComm;
454 std::size_t nodomains = (std::size_t)std::ceil(dunknowns/(criterion.minAggregateSize()
455 *criterion.coarsenTarget()));
456 if( nodomains<=criterion.minAggregateSize()/2 ||
457 dunknowns <= criterion.coarsenTarget() )
458 nodomains=1;
459
460 bool existentOnNextLevel =
461 repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
462 redistComm, redistributes_.back(), nodomains,
463 criterion);
464 BIGINT unknownsRedist = redistMat->N();
465 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
466 dunknowns= unknownsRedist.todouble();
467 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1)
468 std::cout<<"Level "<<level<<" (redistributed) has "<<dunknowns<<" unknowns, "<<dunknowns/redistComm->communicator().size()
469 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
470 MatrixArgs args(redistMat, *redistComm);
471 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
472 assert(mlevel.isRedistributed());
473 infoLevel.addRedistributed(redistComm);
474 infoLevel->freeGlobalLookup();
475
476 if(!existentOnNextLevel)
477 // We do not hold any data on the redistributed partitioning
478 break;
479
480 // Work on the redistributed Matrix from now on
481 matrix = &(mlevel.getRedistributed());
482 info = &(infoLevel.getRedistributed());
483 info->buildGlobalLookup(matrix->getmat().N());
484 }
485
486 rank = info->communicator().rank();
487 if(dunknowns <= criterion.coarsenTarget())
488 // No further coarsening needed
489 break;
490
492 typedef typename GraphCreator::PropertiesGraph PropertiesGraph;
493 typedef typename GraphCreator::GraphTuple GraphTuple;
494
495 typedef typename PropertiesGraph::VertexDescriptor Vertex;
496
497 std::vector<bool> excluded(matrix->getmat().N(), false);
498
499 GraphTuple graphs = GraphCreator::create(*matrix, excluded, *info, OverlapFlags());
500
501 AggregatesMap* aggregatesMap=new AggregatesMap(std::get<1>(graphs)->maxVertex()+1);
502
503 aggregatesMaps_.push_back(aggregatesMap);
504
505 Timer watch;
506 watch.reset();
507 auto [noAggregates, isoAggregates, oneAggregates, skippedAggregates] =
508 aggregatesMap->buildAggregates(matrix->getmat(), *(std::get<1>(graphs)), criterion, level==0);
509
510 if(rank==0 && criterion.debugLevel()>2)
511 std::cout<<" Have built "<<noAggregates<<" aggregates totally ("<<isoAggregates<<" isolated aggregates, "<<
512 oneAggregates<<" aggregates of one vertex, and skipped "<<
513 skippedAggregates<<" aggregates)."<<std::endl;
514#ifdef TEST_AGGLO
515 {
516 // calculate size of local matrix in the distributed direction
517 int start, end, overlapStart, overlapEnd;
518 int procs=info->communicator().rank();
519 int n = UNKNOWNS/procs; // number of unknowns per process
520 int bigger = UNKNOWNS%procs; // number of process with n+1 unknows
521
522 // Compute owner region
523 if(rank<bigger) {
524 start = rank*(n+1);
525 end = (rank+1)*(n+1);
526 }else{
527 start = bigger + rank * n;
528 end = bigger + (rank + 1) * n;
529 }
530
531 // Compute overlap region
532 if(start>0)
533 overlapStart = start - 1;
534 else
535 overlapStart = start;
536
537 if(end<UNKNOWNS)
538 overlapEnd = end + 1;
539 else
540 overlapEnd = end;
541
542 assert((UNKNOWNS)*(overlapEnd-overlapStart)==aggregatesMap->noVertices());
543 for(int j=0; j< UNKNOWNS; ++j)
544 for(int i=0; i < UNKNOWNS; ++i)
545 {
546 if(i>=overlapStart && i<overlapEnd)
547 {
548 int no = (j/2)*((UNKNOWNS)/2)+i/2;
549 (*aggregatesMap)[j*(overlapEnd-overlapStart)+i-overlapStart]=no;
550 }
551 }
552 }
553#endif
554 if(criterion.debugLevel()>1 && info->communicator().rank()==0)
555 std::cout<<"aggregating finished."<<std::endl;
556
557 BIGINT gnoAggregates=noAggregates;
558 gnoAggregates = info->communicator().sum(gnoAggregates);
559 double dgnoAggregates = gnoAggregates.todouble();
560#ifdef TEST_AGGLO
561 BIGINT gnoAggregates=((UNKNOWNS)/2)*((UNKNOWNS)/2);
562#endif
563
564 if(criterion.debugLevel()>2 && rank==0)
565 std::cout << "Building "<<dgnoAggregates<<" aggregates took "<<watch.elapsed()<<" seconds."<<std::endl;
566
567 if(dgnoAggregates==0 || dunknowns/dgnoAggregates<criterion.minCoarsenRate())
568 {
569 if(rank==0)
570 {
571 if(dgnoAggregates>0)
572 std::cerr << "Stopped coarsening because of rate breakdown "<<dunknowns<<"/"<<dgnoAggregates
573 <<"="<<dunknowns/dgnoAggregates<<"<"
574 <<criterion.minCoarsenRate()<<std::endl;
575 else
576 std::cerr<< "Could not build any aggregates. Probably no connected nodes."<<std::endl;
577 }
578 aggregatesMap->free();
579 delete aggregatesMap;
580 aggregatesMaps_.pop_back();
581
582 if(criterion.accumulate() && mlevel.isRedistributed() && info->communicator().size()>1) {
583 // coarse level matrix was already redistributed, but to more than 1 process
584 // Therefore need to delete the redistribution. Further down it will
585 // then be redistributed to 1 process
586 delete &(mlevel.getRedistributed().getmat());
587 mlevel.deleteRedistributed();
588 delete &(infoLevel.getRedistributed());
589 infoLevel.deleteRedistributed();
590 redistributes_.back().resetSetup();
591 }
592
593 break;
594 }
595 unknowns = noAggregates;
596 dunknowns = dgnoAggregates;
597
598 CommunicationArgs commargs(info->communicator(),info->category());
599 parallelInformation_.addCoarser(commargs);
600
601 ++infoLevel; // parallel information on coarse level
602
603 typename PropertyMapTypeSelector<VertexVisitedTag,PropertiesGraph>::Type visitedMap =
604 get(VertexVisitedTag(), *(std::get<1>(graphs)));
605
606 watch.reset();
608 ::coarsen(*info,
609 *(std::get<1>(graphs)),
610 visitedMap,
611 *aggregatesMap,
612 *infoLevel,
613 noAggregates);
614 GraphCreator::free(graphs);
615
616 if(criterion.debugLevel()>2) {
617 if(rank==0)
618 std::cout<<"Coarsening of index sets took "<<watch.elapsed()<<" seconds."<<std::endl;
619 }
620
621 watch.reset();
622
623 infoLevel->buildGlobalLookup(aggregates);
625 *info,
626 infoLevel->globalLookup());
627
628
629 if(criterion.debugLevel()>2) {
630 if(rank==0)
631 std::cout<<"Communicating global aggregate numbers took "<<watch.elapsed()<<" seconds."<<std::endl;
632 }
633
634 watch.reset();
635 std::vector<bool>& visited=excluded;
636
637 typedef std::vector<bool>::iterator Iterator;
638 typedef IteratorPropertyMap<Iterator, IdentityMap> VisitedMap2;
639 Iterator end = visited.end();
640 for(Iterator iter= visited.begin(); iter != end; ++iter)
641 *iter=false;
642
643 VisitedMap2 visitedMap2(visited.begin(), Dune::IdentityMap());
644
645 std::shared_ptr<typename MatrixOperator::matrix_type>
646 coarseMatrix(productBuilder.build(*(std::get<0>(graphs)), visitedMap2,
647 *info,
648 *aggregatesMap,
649 aggregates,
650 OverlapFlags()));
651 dverb<<"Building of sparsity pattern took "<<watch.elapsed()<<std::endl;
652 watch.reset();
653 info->freeGlobalLookup();
654
655 delete std::get<0>(graphs);
656 productBuilder.calculate(matrix->getmat(), *aggregatesMap, *coarseMatrix, *infoLevel, OverlapFlags());
657
658 if(criterion.debugLevel()>2) {
659 if(rank==0)
660 std::cout<<"Calculation entries of Galerkin product took "<<watch.elapsed()<<" seconds."<<std::endl;
661 }
662
663 BIGINT nonzeros = countNonZeros(*coarseMatrix);
664 allnonzeros = allnonzeros + infoLevel->communicator().sum(nonzeros);
665 MatrixArgs args(coarseMatrix, *infoLevel);
666
667 matrices_.addCoarser(args);
668 redistributes_.push_back(RedistributeInfoType());
669 } // end level loop
670
671
672 infoLevel->freeGlobalLookup();
673
674 built_=true;
675 AggregatesMap* aggregatesMap=new AggregatesMap(0);
676 aggregatesMaps_.push_back(aggregatesMap);
677
678 if(criterion.debugLevel()>0) {
679 if(level==criterion.maxLevel()) {
680 BIGINT unknownsLevel = mlevel->getmat().N();
681 unknownsLevel = infoLevel->communicator().sum(unknownsLevel);
682 double dunknownsLevel = unknownsLevel.todouble();
683 if(rank==0 && criterion.debugLevel()>1) {
684 std::cout<<"Level "<<level<<" has "<<dunknownsLevel<<" unknowns, "<<dunknownsLevel/infoLevel->communicator().size()
685 <<" unknowns per proc (procs="<<infoLevel->communicator().size()<<")"<<std::endl;
686 }
687 }
688 }
689
690 if(criterion.accumulate() && !redistributes_.back().isSetup() &&
691 infoLevel->communicator().size()>1) {
692#if HAVE_MPI && !HAVE_PARMETIS
693 if(criterion.accumulate()==successiveAccu &&
694 infoLevel->communicator().rank()==0)
695 std::cerr<<"Successive accumulation of data on coarse levels only works with ParMETIS installed."
696 <<" Fell back to accumulation to one domain on coarsest level"<<std::endl;
697#endif
698
699 // accumulate to fewer processors
700 std::shared_ptr<Matrix> redistMat = std::make_shared<Matrix>();
701 std::shared_ptr<ParallelInformation> redistComm;
702 int nodomains = 1;
703
704 repartitionAndDistributeMatrix(mlevel->getmat(), redistMat, *infoLevel,
705 redistComm, redistributes_.back(), nodomains,criterion);
706 MatrixArgs args(redistMat, *redistComm);
707 BIGINT unknownsRedist = redistMat->N();
708 unknownsRedist = infoLevel->communicator().sum(unknownsRedist);
709
710 if(redistComm->communicator().rank()==0 && criterion.debugLevel()>1) {
711 double dunknownsRedist = unknownsRedist.todouble();
712 std::cout<<"Level "<<level<<" redistributed has "<<dunknownsRedist<<" unknowns, "<<dunknownsRedist/redistComm->communicator().size()
713 <<" unknowns per proc (procs="<<redistComm->communicator().size()<<")"<<std::endl;
714 }
715 mlevel.addRedistributed(ConstructionTraits<MatrixOperator>::construct(args));
716 infoLevel.addRedistributed(redistComm);
717 infoLevel->freeGlobalLookup();
718 }
719
720 int levels = matrices_.levels();
721 maxlevels_ = parallelInformation_.finest()->communicator().max(levels);
722 assert(matrices_.levels()==redistributes_.size());
723 if(hasCoarsest() && rank==0 && criterion.debugLevel()>1)
724 std::cout<<"operator complexity: "<<allnonzeros.todouble()/finenonzeros.todouble()<<std::endl;
725
726 }
727
728 template<class M, class IS, class A>
731 {
732 return matrices_;
733 }
734
735 template<class M, class IS, class A>
738 {
739 return parallelInformation_;
740 }
741
742 template<class M, class IS, class A>
743 void MatrixHierarchy<M,IS,A>::getCoarsestAggregatesOnFinest(std::vector<std::size_t>& data) const
744 {
745 int levels=aggregatesMaps().size();
746 int maxlevels=parallelInformation_.finest()->communicator().max(levels);
747 std::size_t size=(*(aggregatesMaps().begin()))->noVertices();
748 // We need an auxiliary vector for the consecutive prolongation.
749 std::vector<std::size_t> tmp;
750 std::vector<std::size_t> *coarse, *fine;
751
752 // make sure the allocated space suffices.
753 tmp.reserve(size);
754 data.reserve(size);
755
756 // Correctly assign coarse and fine for the first prolongation such that
757 // we end up in data in the end.
758 if(levels%2==0) {
759 coarse=&tmp;
760 fine=&data;
761 }else{
762 coarse=&data;
763 fine=&tmp;
764 }
765
766 // Number the unknowns on the coarsest level consecutively for each process.
767 if(levels==maxlevels) {
768 const AggregatesMap& map = *(*(++aggregatesMaps().rbegin()));
769 std::size_t m=0;
770
771 for(typename AggregatesMap::const_iterator iter = map.begin(); iter != map.end(); ++iter)
772 if(*iter< AggregatesMap::ISOLATED)
773 m=std::max(*iter,m);
774
775 coarse->resize(m+1);
776 std::size_t i=0;
777 srand((unsigned)std::clock());
778 std::set<size_t> used;
779 for(typename std::vector<std::size_t>::iterator iter=coarse->begin(); iter != coarse->end();
780 ++iter, ++i)
781 {
782 std::pair<std::set<std::size_t>::iterator,bool> ibpair
783 = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0)))*coarse->size());
784
785 while(!ibpair.second)
786 ibpair = used.insert(static_cast<std::size_t>((((double)rand())/(RAND_MAX+1.0))*coarse->size()));
787 *iter=*(ibpair.first);
788 }
789 }
790
791 typename ParallelInformationHierarchy::Iterator pinfo = parallelInformation().coarsest();
792 --pinfo;
793
794 // Now consecutively project the numbers to the finest level.
795 for(typename AggregatesMapList::const_reverse_iterator aggregates=++aggregatesMaps().rbegin();
796 aggregates != aggregatesMaps().rend(); ++aggregates,--levels) {
797
798 fine->resize((*aggregates)->noVertices());
799 fine->assign(fine->size(), 0);
801 ::prolongateVector(*(*aggregates), *coarse, *fine, static_cast<std::size_t>(1), *pinfo);
802 --pinfo;
803 std::swap(coarse, fine);
804 }
805
806 // Assertion to check that we really projected to data on the last step.
807 assert(coarse==&data);
808 }
809
810 template<class M, class IS, class A>
813 {
814 return aggregatesMaps_;
815 }
816 template<class M, class IS, class A>
819 {
820 return redistributes_;
821 }
822
823 template<class M, class IS, class A>
825 {
826 typedef typename AggregatesMapList::reverse_iterator AggregatesMapIterator;
827 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
828 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
829
830 AggregatesMapIterator amap = aggregatesMaps_.rbegin();
831 InfoIterator info = parallelInformation_.coarsest();
832 for(Iterator level=matrices_.coarsest(), finest=matrices_.finest(); level != finest; --level, --info, ++amap) {
833 (*amap)->free();
834 delete *amap;
835 }
836 delete *amap;
837 }
838
839 template<class M, class IS, class A>
840 template<class V, class BA, class TA>
842 {
843 assert(hierarchy.levels()==1);
844 typedef typename ParallelMatrixHierarchy::ConstIterator Iterator;
845 typedef typename RedistributeInfoList::const_iterator RIter;
846 RIter redist = redistributes_.begin();
847
848 Iterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
849 int level=0;
850 if(redist->isSetup())
851 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
852 Dune::dvverb<<"Level "<<level<<" has "<<matrices_.finest()->getmat().N()<<" unknowns!"<<std::endl;
853
854 while(matrix != coarsest) {
855 ++matrix; ++level; ++redist;
856 Dune::dvverb<<"Level "<<level<<" has "<<matrix->getmat().N()<<" unknowns!"<<std::endl;
857
858 hierarchy.addCoarser(matrix->getmat().N());
859 if(redist->isSetup())
860 hierarchy.addRedistributedOnCoarsest(matrix.getRedistributed().getmat().N());
861
862 }
863
864 }
865
866 template<class M, class IS, class A>
867 template<class S, class TA>
869 const typename SmootherTraits<S>::Arguments& sargs) const
870 {
871 assert(smoothers.levels()==0);
872 typedef typename ParallelMatrixHierarchy::ConstIterator MatrixIterator;
873 typedef typename ParallelInformationHierarchy::ConstIterator PinfoIterator;
874 typedef typename AggregatesMapList::const_iterator AggregatesIterator;
875
877 cargs.setArgs(sargs);
878 PinfoIterator pinfo = parallelInformation_.finest();
879 AggregatesIterator aggregates = aggregatesMaps_.begin();
880 int level=0;
881 for(MatrixIterator matrix = matrices_.finest(), coarsest = matrices_.coarsest();
882 matrix != coarsest; ++matrix, ++pinfo, ++aggregates, ++level) {
883 cargs.setMatrix(matrix->getmat(), **aggregates);
884 cargs.setComm(*pinfo);
885 smoothers.addCoarser(cargs);
886 }
887 if(maxlevels()>levels()) {
888 // This is not the globally coarsest level and therefore smoothing is needed
889 cargs.setMatrix(matrices_.coarsest()->getmat(), **aggregates);
890 cargs.setComm(*pinfo);
891 smoothers.addCoarser(cargs);
892 ++level;
893 }
894 }
895
896 template<class M, class IS, class A>
897 template<class F>
899 {
900 typedef typename AggregatesMapList::iterator AggregatesMapIterator;
901 typedef typename ParallelMatrixHierarchy::Iterator Iterator;
902 typedef typename ParallelInformationHierarchy::Iterator InfoIterator;
903
904 AggregatesMapIterator amap = aggregatesMaps_.begin();
905 BaseGalerkinProduct productBuilder;
906 InfoIterator info = parallelInformation_.finest();
907 typename RedistributeInfoList::iterator riIter = redistributes_.begin();
908 Iterator level = matrices_.finest(), coarsest=matrices_.coarsest();
909 if(level.isRedistributed()) {
910 info->buildGlobalLookup(level->getmat().N());
911 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
912 const_cast<Matrix&>(level.getRedistributed().getmat()),
913 *info,info.getRedistributed(), *riIter);
914 info->freeGlobalLookup();
915 }
916
917 for(; level!=coarsest; ++amap) {
918 const Matrix& fine = (level.isRedistributed() ? level.getRedistributed() : *level).getmat();
919 ++level;
920 ++info;
921 ++riIter;
922 productBuilder.calculate(fine, *(*amap), const_cast<Matrix&>(level->getmat()), *info, copyFlags);
923 if(level.isRedistributed()) {
924 info->buildGlobalLookup(level->getmat().N());
925 redistributeMatrixEntries(const_cast<Matrix&>(level->getmat()),
926 const_cast<Matrix&>(level.getRedistributed().getmat()), *info,
927 info.getRedistributed(), *riIter);
928 info->freeGlobalLookup();
929 }
930 }
931 }
932
933 template<class M, class IS, class A>
935 {
936 return matrices_.levels();
937 }
938
939 template<class M, class IS, class A>
941 {
942 return maxlevels_;
943 }
944
945 template<class M, class IS, class A>
947 {
948 return levels()==maxlevels() &&
949 (!matrices_.coarsest().isRedistributed() ||matrices_.coarsest()->getmat().N()>0);
950 }
951
952 template<class M, class IS, class A>
954 {
955 return built_;
956 }
957
959 } // namespace Amg
960} // namespace Dune
961
962#endif // end DUNE_AMG_MATRIXHIERARCHY_HH
This file implements a vector space as a tensor product of a given vector space. The number of compon...
Functionality for redistributing a sparse matrix.
Some handy generic functions for ISTL matrices.
Provides classes for the Coloring process of AMG.
Helper classes for the construction of classes without empty constructor.
Provides classes for initializing the link attributes of a matrix graph.
Provides a class for building the galerkin product based on a aggregation scheme.
Provdes class for identifying aggregates globally.
Provides classes for building the matrix graph.
Provides a classes representing the hierarchies in AMG.
Provides a class for building the index set and remote indices on the coarse level.
Classes for the generic construction and application of the smoothers.
Prolongation and restriction for amg.
auto countNonZeros(const M &, typename std::enable_if_t< Dune::IsNumber< M >::value > *sfinae=nullptr)
Get the number of nonzero fields in the matrix.
Definition matrixutils.hh:119
const AggregatesMapList & aggregatesMaps() const
Get the hierarchy of the mappings of the nodes onto aggregates.
Definition matrixhierarchy.hh:812
bool isBuilt() const
Whether the hierarchy was built.
Definition matrixhierarchy.hh:953
bool hasCoarsest() const
Definition matrixhierarchy.hh:946
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition hierarchy.hh:322
std::size_t levels() const
Get the number of levels in the hierarchy.
Definition matrixhierarchy.hh:934
void addCoarser(Arguments &args)
Add an element on a coarser level.
Definition hierarchy.hh:334
const RedistributeInfoList & redistributeInformation() const
Get the hierarchy of the information about redistributions,.
Definition matrixhierarchy.hh:818
const ParallelInformationHierarchy & parallelInformation() const
Get the hierarchy of the parallel data distribution information.
Definition matrixhierarchy.hh:737
bool repartitionAndDistributeMatrix(const M &origMatrix, std::shared_ptr< M > newMatrix, SequentialInformation &origComm, std::shared_ptr< SequentialInformation > &newComm, RedistributeInformation< SequentialInformation > &ri, int nparts, C1 &criterion)
Definition matrixhierarchy.hh:313
const_iterator begin() const
Definition aggregates.hh:725
const ParallelMatrixHierarchy & matrices() const
Get the matrix hierarchy.
Definition matrixhierarchy.hh:730
std::size_t maxlevels() const
Get the max number of levels in the hierarchy of processors.
Definition matrixhierarchy.hh:940
const_iterator end() const
Definition aggregates.hh:730
static const V ISOLATED
Identifier of isolated vertices.
Definition aggregates.hh:571
void recalculateGalerkin(const F &copyFlags)
Recalculate the galerkin products.
Definition matrixhierarchy.hh:898
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition construction.hh:44
std::size_t noVertices() const
Get the number of vertices.
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition construction.hh:52
void coarsenVector(Hierarchy< BlockVector< V, BA >, TA > &hierarchy) const
Coarsen the vector hierarchy according to the matrix hierarchy.
Definition matrixhierarchy.hh:841
const AggregateDescriptor * const_iterator
Definition aggregates.hh:723
MatrixHierarchy(std::shared_ptr< MatrixOperator > fineMatrix, std::shared_ptr< ParallelInformation > pinfo=std::make_shared< ParallelInformation >())
Constructor.
Definition matrixhierarchy.hh:389
AccumulationMode
Identifiers for the different accumulation modes.
Definition parameters.hh:232
Iterator finest()
Get an iterator positioned at the finest level.
Definition hierarchy.hh:377
void build(const T &criterion)
Build the matrix hierarchy using aggregation.
Definition matrixhierarchy.hh:400
void free()
Free the allocated memory.
void coarsenSmoother(Hierarchy< S, TA > &smoothers, const typename SmootherTraits< S >::Arguments &args) const
Coarsen the smoother hierarchy according to the matrix hierarchy.
Definition matrixhierarchy.hh:868
void buildDependency(G &graph, const typename C::Matrix &matrix, C criterion, bool finestLevel)
Build the dependency of the matrix graph.
std::tuple< int, int, int, int > buildAggregates(const M &matrix, G &graph, const C &criterion, bool finestLevel)
Build the aggregates.
void calculate(const M &fine, const AggregatesMap< V > &aggregates, M &coarse, const I &pinfo, const O &copy)
Calculate the galerkin product.
void getCoarsestAggregatesOnFinest(std::vector< std::size_t > &data) const
Get the mapping of fine level unknowns to coarse level aggregates.
Definition matrixhierarchy.hh:743
~MatrixHierarchy()
Definition matrixhierarchy.hh:824
@ MAX_PROCESSES
Hard limit for the number of processes allowed.
Definition matrixhierarchy.hh:50
@ atOnceAccu
Accumulate data to one process at once.
Definition parameters.hh:244
@ successiveAccu
Successively accumulate to fewer processes.
Definition parameters.hh:248
Definition allocator.hh:11
void printGlobalSparseMatrix(const M &mat, C &ooc, std::ostream &os)
Definition matrixutils.hh:154
PropertyMapTypeSelector< Amg::VertexVisitedTag, Amg::PropertiesGraph< G, Amg::VertexProperties, EP, VM, EM > >::Type get(const Amg::VertexVisitedTag &tag, Amg::PropertiesGraph< G, Amg::VertexProperties, EP, VM, EM > &graph)
Definition dependency.hh:293
void redistributeMatrixEntries(M &origMatrix, M &newMatrix, C &origComm, C &newComm, RedistributeInformation< C > &ri)
Definition matrixredistribute.hh:757
bool commGraphRepartition(const M &mat, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 > > &outcomm, RedistributeInterface &redistInf, bool verbose=false)
Definition repartition.hh:829
void redistributeMatrix(M &origMatrix, M &newMatrix, C &origComm, C &newComm, RedistributeInformation< C > &ri)
Redistribute a matrix according to given domain decompositions.
Definition matrixredistribute.hh:820
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Metis::idx_t nparts, std::shared_ptr< Dune::OwnerOverlapCopyCommunication< T1, T2 > > &outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition repartition.hh:1235
A vector of blocks with memory management.
Definition bvector.hh:395
derive error class from the base class in common
Definition istlexception.hh:19
A generic dynamic dense matrix.
Definition matrix.hh:561
A::size_type size_type
Type for indices and sizes.
Definition matrix.hh:577
MatrixImp::DenseMatrixBase< T, A >::window_type row_type
The type implementing a matrix row.
Definition matrix.hh:574
Definition matrixredistribute.hh:22
Class providing information about the mapping of the vertices onto aggregates.
Definition aggregates.hh:560
Class representing the properties of an ede in the matrix graph.
Definition dependency.hh:39
Class representing a node in the matrix graph.
Definition dependency.hh:126
Definition galerkin.hh:99
Definition galerkin.hh:118
Definition globalaggregates.hh:131
The (undirected) graph of a matrix.
Definition graph.hh:51
Attaches properties to the edges and vertices of a graph.
Definition graph.hh:978
Graph::VertexDescriptor VertexDescriptor
The vertex descriptor.
Definition graph.hh:988
Definition graphcreator.hh:22
A hierarchy of containers (e.g. matrices or vectors)
Definition hierarchy.hh:40
LevelIterator< Hierarchy< MatrixOperator, Allocator >, MatrixOperator > Iterator
Type of the mutable iterator.
Definition hierarchy.hh:216
LevelIterator< const Hierarchy< MatrixOperator, Allocator >, const MatrixOperator > ConstIterator
Type of the const iterator.
Definition hierarchy.hh:219
Definition indicescoarsener.hh:36
The hierarchies build by the coarsening process.
Definition matrixhierarchy.hh:61
typename std::allocator_traits< Allocator >::template rebind_alloc< AggregatesMap * > AAllocator
Allocator for pointers.
Definition matrixhierarchy.hh:85
Dune::Amg::Hierarchy< ParallelInformation, Allocator > ParallelInformationHierarchy
The type of the parallel informarion hierarchy.
Definition matrixhierarchy.hh:82
std::list< AggregatesMap *, AAllocator > AggregatesMapList
The type of the aggregates maps list.
Definition matrixhierarchy.hh:88
PI ParallelInformation
The type of the index set.
Definition matrixhierarchy.hh:70
Dune::Amg::Hierarchy< MatrixOperator, Allocator > ParallelMatrixHierarchy
The type of the parallel matrix hierarchy.
Definition matrixhierarchy.hh:79
A Allocator
The allocator to use.
Definition matrixhierarchy.hh:73
RedistributeInformation< ParallelInformation > RedistributeInfoType
The type of the redistribute information.
Definition matrixhierarchy.hh:91
double getProlongationDampingFactor() const
Definition matrixhierarchy.hh:188
typename std::allocator_traits< Allocator >::template rebind_alloc< RedistributeInfoType > RILAllocator
Allocator for RedistributeInfoType.
Definition matrixhierarchy.hh:94
std::list< RedistributeInfoType, RILAllocator > RedistributeInfoList
The type of the list of redistribute information.
Definition matrixhierarchy.hh:97
Dune::Amg::AggregatesMap< typename MatrixGraph< Matrix >::VertexDescriptor > AggregatesMap
The type of the aggregates map we use.
Definition matrixhierarchy.hh:76
MatrixOperator::matrix_type Matrix
The type of the matrix.
Definition matrixhierarchy.hh:67
M MatrixOperator
The type of the matrix operator.
Definition matrixhierarchy.hh:64
void operator()(const matrix_row &row)
Definition matrixhierarchy.hh:254
Matrix::row_type matrix_row
Definition matrixhierarchy.hh:245
size_type min
Definition matrixhierarchy.hh:261
size_type max
Definition matrixhierarchy.hh:262
size_type sum
Definition matrixhierarchy.hh:263
Matrix::size_type size_type
Definition matrixhierarchy.hh:244
The criterion describing the stop criteria for the coarsening process.
Definition matrixhierarchy.hh:283
CoarsenCriterion(const Dune::Amg::Parameters &parms)
Definition matrixhierarchy.hh:306
T AggregationCriterion
The criterion for tagging connections as strong and nodes as isolated. This might be e....
Definition matrixhierarchy.hh:289
CoarsenCriterion(int maxLevel=100, int coarsenTarget=1000, double minCoarsenRate=1.2, double prolongDamp=1.6, AccumulationMode accumulate=successiveAccu)
Constructor.
Definition matrixhierarchy.hh:301
All parameters for AMG.
Definition parameters.hh:393
Definition pinfo.hh:28
Tag idnetifying the visited property of a vertex.
Definition properties.hh:29
The default class for the smoother arguments.
Definition smoother.hh:38
Definition transfer.hh:32
static Category category(const OP &op, decltype(op.category()) *=nullptr)
Helperfunction to extract the solver category either from an enum, or from the newly introduced virtu...
Definition solvercategory.hh:34