My Project
amgcpr.hh
Go to the documentation of this file.
1 // -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=4 sw=2 sts=2:
3 #ifndef DUNE_AMG_AMG_CPR_HH
4 #define DUNE_AMG_AMG_CPR_HH
5 
6 // NOTE: This file is a modified version of dune/istl/paamg/amg.hh from
7 // dune-istl release 2.6.0. Modifications have been kept as minimal as possible.
8 
9 #include <opm/simulators/linalg/PreconditionerWithUpdate.hpp>
10 
11 #include <dune/common/exceptions.hh>
12 #include <dune/common/version.hh>
13 #include <dune/istl/paamg/amg.hh>
14 #include <dune/istl/paamg/smoother.hh>
15 #include <dune/istl/paamg/transfer.hh>
16 #include <dune/istl/paamg/hierarchy.hh>
17 #include <dune/istl/solvers.hh>
18 #include <dune/istl/scalarproducts.hh>
19 #include <dune/istl/superlu.hh>
20 #include <dune/istl/umfpack.hh>
21 #include <dune/istl/solvertype.hh>
22 #include <dune/common/typetraits.hh>
23 #include <dune/common/exceptions.hh>
24 
25 #include <memory>
26 
27 namespace Dune
28 {
29  namespace Amg
30  {
31 
32 
33 #if HAVE_MPI
34  template<class M, class T>
35  void redistributeMatrixAmg(M&, M&, SequentialInformation&, SequentialInformation&, T&)
36  {
37  // noop
38  }
39 
40  template<class M, class PI>
41  typename std::enable_if<!std::is_same<PI,SequentialInformation>::value,void>::type
42  redistributeMatrixAmg(M& mat, M& matRedist, PI& info, PI& infoRedist,
43  Dune::RedistributeInformation<PI>& redistInfo)
44  {
45  info.buildGlobalLookup(mat.N());
46  redistributeMatrixEntries(mat, matRedist, info, infoRedist, redistInfo);
47  info.freeGlobalLookup();
48  }
49 #endif
50 
68  template<class M, class X, class S, class P, class K, class A>
69  class KAMG;
70 
71  template<class T>
72  class KAmgTwoGrid;
73 
84  template<class M, class X, class S, class PI=SequentialInformation,
85  class A=std::allocator<X> >
86  class AMGCPR : public PreconditionerWithUpdate<X,X>
87  {
88  template<class M1, class X1, class S1, class P1, class K1, class A1>
89  friend class KAMG;
90 
91  friend class KAmgTwoGrid<AMGCPR>;
92 
93  public:
95  typedef M Operator;
104  typedef MatrixHierarchy<M, ParallelInformation, A> OperatorHierarchy;
106  typedef typename OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy;
107 
109  typedef X Domain;
111  typedef X Range;
113  typedef InverseOperator<X,X> CoarseSolver;
119  typedef S Smoother;
120 
122  typedef typename SmootherTraits<Smoother>::Arguments SmootherArgs;
123 
133  AMGCPR(const OperatorHierarchy& matrices, CoarseSolver& coarseSolver,
134  const SmootherArgs& smootherArgs, const Parameters& parms);
135 
147  template<class C>
148  AMGCPR(const Operator& fineOperator, const C& criterion,
149  const SmootherArgs& smootherArgs=SmootherArgs(),
151 
155  AMGCPR(const AMGCPR& amg);
156 
157  ~AMGCPR();
158 
160  void pre(Domain& x, Range& b);
161 
163  void apply(Domain& v, const Range& d);
164 
166  virtual SolverCategory::Category category() const
167  {
168  return category_;
169  }
170 
172  void post(Domain& x);
173 
178  template<class A1>
179  void getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont);
180 
181  std::size_t levels();
182 
183  std::size_t maxlevels();
184 
194  {
195  auto copyFlags = NegateSet<typename PI::OwnerSet>();
196  const auto& matrices = matrices_->matrices();
197  const auto& aggregatesMapHierarchy = matrices_->aggregatesMaps();
198  const auto& infoHierarchy = matrices_->parallelInformation();
199  const auto& redistInfoHierarchy = matrices_->redistributeInformation();
200  BaseGalerkinProduct productBuilder;
201  auto aggregatesMap = aggregatesMapHierarchy.begin();
202  auto info = infoHierarchy.finest();
203  auto redistInfo = redistInfoHierarchy.begin();
204  auto matrix = matrices.finest();
205  auto coarsestMatrix = matrices.coarsest();
206 
207  using Matrix = typename M::matrix_type;
208 
209 #if HAVE_MPI
210  if(matrix.isRedistributed()) {
211  redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
212  const_cast<Matrix&>(matrix.getRedistributed().getmat()),
213  const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
214  const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
215  }
216 #endif
217 
218  for(; matrix!=coarsestMatrix; ++aggregatesMap) {
219  const Matrix& fine = (matrix.isRedistributed() ? matrix.getRedistributed() : *matrix).getmat();
220  ++matrix;
221  ++info;
222  ++redistInfo;
223  productBuilder.calculate(fine, *(*aggregatesMap), const_cast<Matrix&>(matrix->getmat()), *info, copyFlags);
224 #if HAVE_MPI
225  if(matrix.isRedistributed()) {
226  redistributeMatrixAmg(const_cast<Matrix&>(matrix->getmat()),
227  const_cast<Matrix&>(matrix.getRedistributed().getmat()),
228  const_cast<PI&>(*info), const_cast<PI&>(info.getRedistributed()),
229  const_cast<Dune::RedistributeInformation<PI>&>(*redistInfo));
230  }
231 #endif
232  }
233  }
234 
238  template<class C>
239  void updateSolver(C& criterion, const Operator& /* matrix */, const PI& pinfo);
240 
244  virtual void update();
245 
250  bool usesDirectCoarseLevelSolver() const;
251 
252  private:
259 #if DUNE_VERSION_NEWER( DUNE_ISTL, 2, 7 )
260  template<class C>
261  void createHierarchies(C& criterion, Operator& matrix,
262  const PI& pinfo)
263  {
264  // create shared_ptr with empty deleter
265  std::shared_ptr< Operator > op( &matrix, []( Operator* ) {});
266  std::shared_ptr< PI > pifo( const_cast< PI* > (&pinfo), []( PI * ) {});
267  createHierarchies( criterion, op, pifo);
268  }
269 
270  template<class C>
271  void createHierarchies(C& criterion, std::shared_ptr< Operator > matrix,
272  std::shared_ptr< PI > pinfo );
273 
274 #else
275  template<class C>
276  void createHierarchies(C& criterion, Operator& matrix,
277  const PI& pinfo);
278 #endif
279 
280  void setupCoarseSolver();
281 
288  struct LevelContext
289  {
290  typedef Smoother SmootherType;
294  typename Hierarchy<Smoother,A>::Iterator smoother;
298  typename OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix;
302  typename ParallelInformationHierarchy::Iterator pinfo;
306  typename OperatorHierarchy::RedistributeInfoList::const_iterator redist;
310  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates;
314  typename Hierarchy<Domain,A>::Iterator lhs;
318  typename Hierarchy<Domain,A>::Iterator update;
322  typename Hierarchy<Range,A>::Iterator rhs;
326  std::size_t level;
327  };
328 
329 
334  void mgc(LevelContext& levelContext);
335 
336  void additiveMgc();
337 
344  void moveToFineLevel(LevelContext& levelContext,bool processedFineLevel);
345 
350  bool moveToCoarseLevel(LevelContext& levelContext);
351 
356  void initIteratorsWithFineLevel(LevelContext& levelContext);
357 
359  std::shared_ptr<OperatorHierarchy> matrices_;
361  SmootherArgs smootherArgs_;
363  std::shared_ptr<Hierarchy<Smoother,A> > smoothers_;
365  std::shared_ptr<CoarseSolver> solver_;
367  std::shared_ptr< Hierarchy<Range,A> > rhs_;
369  std::shared_ptr< Hierarchy<Domain,A> > lhs_;
371  std::shared_ptr< Hierarchy<Domain,A> > update_;
373  using ScalarProduct = Dune::ScalarProduct<X>;
375  std::shared_ptr<ScalarProduct> scalarProduct_;
377  std::size_t gamma_;
379  std::size_t preSteps_;
381  std::size_t postSteps_;
382  bool buildHierarchy_;
383  bool additive;
384  bool coarsesolverconverged;
385  std::shared_ptr<Smoother> coarseSmoother_;
387  SolverCategory::Category category_;
389  std::size_t verbosity_;
390  };
391 
392  template<class M, class X, class S, class PI, class A>
394  : matrices_(amg.matrices_), smootherArgs_(amg.smootherArgs_),
395  smoothers_(amg.smoothers_), solver_(amg.solver_),
396  rhs_(), lhs_(), update_(),
397  scalarProduct_(amg.scalarProduct_), gamma_(amg.gamma_),
398  preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
399  buildHierarchy_(amg.buildHierarchy_),
400  additive(amg.additive), coarsesolverconverged(amg.coarsesolverconverged),
401  coarseSmoother_(amg.coarseSmoother_),
402  category_(amg.category_),
403  verbosity_(amg.verbosity_)
404  {
405  if(amg.rhs_)
406  rhs_.reset( new Hierarchy<Range,A>(*amg.rhs_) );
407  if(amg.lhs_)
408  lhs_.reset( new Hierarchy<Domain,A>(*amg.lhs_) );
409  if(amg.update_)
410  update_.reset( new Hierarchy<Domain,A>(*amg.update_) );
411  }
412 
413  template<class M, class X, class S, class PI, class A>
415  const SmootherArgs& smootherArgs,
416  const Parameters& parms)
417  : matrices_(stackobject_to_shared_ptr(matrices)), smootherArgs_(smootherArgs),
418  smoothers_(new Hierarchy<Smoother,A>), solver_(&coarseSolver),
419  rhs_(), lhs_(), update_(), scalarProduct_(0),
420  gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
421  postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
422  additive(parms.getAdditive()), coarsesolverconverged(true),
423  coarseSmoother_(),
424 // #warning should category be retrieved from matrices?
425  category_(SolverCategory::category(*smoothers_->coarsest())),
426  verbosity_(parms.debugLevel())
427  {
428  assert(matrices_->isBuilt());
429 
430  // build the necessary smoother hierarchies
431  matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
432  }
433 
434  template<class M, class X, class S, class PI, class A>
435  template<class C>
437  const C& criterion,
438  const SmootherArgs& smootherArgs,
439  const PI& pinfo)
440  : smootherArgs_(smootherArgs),
441  smoothers_(new Hierarchy<Smoother,A>), solver_(),
442  rhs_(), lhs_(), update_(), scalarProduct_(),
443  gamma_(criterion.getGamma()), preSteps_(criterion.getNoPreSmoothSteps()),
444  postSteps_(criterion.getNoPostSmoothSteps()), buildHierarchy_(true),
445  additive(criterion.getAdditive()), coarsesolverconverged(true),
446  coarseSmoother_(),
447  category_(SolverCategory::category(pinfo)),
448  verbosity_(criterion.debugLevel())
449  {
450  if(SolverCategory::category(matrix) != SolverCategory::category(pinfo))
451  DUNE_THROW(InvalidSolverCategory, "Matrix and Communication must have the same SolverCategory!");
452  createHierarchies(criterion, const_cast<Operator&>(matrix), pinfo);
453  }
454 
455  template<class M, class X, class S, class PI, class A>
457  {
458  if(buildHierarchy_) {
459  if(solver_)
460  solver_.reset();
461  if(coarseSmoother_)
462  coarseSmoother_.reset();
463  }
464  }
465 
466  template<class M, class X, class S, class PI, class A>
467  template<class C>
468  void AMGCPR<M,X,S,PI,A>::updateSolver(C& /* criterion */, const Operator& /* matrix */, const PI& /* pinfo */)
469  {
470  update();
471  }
472 
473  template<class M, class X, class S, class PI, class A>
475  {
476  Timer watch;
477  smoothers_.reset(new Hierarchy<Smoother,A>);
478  solver_.reset();
479  coarseSmoother_.reset();
480  scalarProduct_.reset();
481  buildHierarchy_= true;
482  coarsesolverconverged = true;
483  smoothers_.reset(new Hierarchy<Smoother,A>);
484  recalculateHierarchy();
485  matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
486  setupCoarseSolver();
487  if (verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0) {
488  std::cout << "Recalculating galerkin and coarse smoothers "<< matrices_->maxlevels() << " levels "
489  << watch.elapsed() << " seconds." << std::endl;
490  }
491  }
492 
493  template<class M, class X, class S, class PI, class A>
494  template<class C>
495 #if DUNE_VERSION_NEWER( DUNE_ISTL, 2, 7)
496  void AMGCPR<M,X,S,PI,A>::createHierarchies(C& criterion, std::shared_ptr< Operator > matrix,
497  std::shared_ptr< PI > pinfo )
498 #else
499  void AMGCPR<M,X,S,PI,A>::createHierarchies(C& criterion, Operator& matrix, const PI& pinfo )
500 #endif
501  {
502  Timer watch;
503  matrices_.reset(new OperatorHierarchy(matrix, pinfo));
504 
505  matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
506 
507  // build the necessary smoother hierarchies
508  matrices_->coarsenSmoother(*smoothers_, smootherArgs_);
509  setupCoarseSolver();
510  if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
511  std::cout<<"Building hierarchy of "<<matrices_->maxlevels()<<" levels "
512  <<"(inclusive coarse solver) took "<<watch.elapsed()<<" seconds."<<std::endl;
513  }
514 
515  template<class M, class X, class S, class PI, class A>
516  void AMGCPR<M,X,S,PI,A>::setupCoarseSolver()
517  {
518  // test whether we should solve on the coarse level. That is the case if we
519  // have that level and if there was a redistribution on this level then our
520  // communicator has to be valid (size()>0) as the smoother might try to communicate
521  // in the constructor.
522  if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()
523  && ( ! matrices_->redistributeInformation().back().isSetup() ||
524  matrices_->parallelInformation().coarsest().getRedistributed().communicator().size() ) )
525  {
526  // We have the carsest level. Create the coarse Solver
527  SmootherArgs sargs(smootherArgs_);
528  sargs.iterations = 1;
529 
530  typename ConstructionTraits<Smoother>::Arguments cargs;
531  cargs.setArgs(sargs);
532  if(matrices_->redistributeInformation().back().isSetup()) {
533  // Solve on the redistributed partitioning
534  cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
535  cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
536  }else{
537  cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
538  cargs.setComm(*matrices_->parallelInformation().coarsest());
539  }
540 
541 #if DUNE_VERSION_NEWER(DUNE_ISTL, 2, 7)
542  coarseSmoother_ = ConstructionTraits<Smoother>::construct(cargs);
543 #else
544  coarseSmoother_.reset(ConstructionTraits<Smoother>::construct(cargs));
545 #endif
546 
547  scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
548 
549 
550  typedef DirectSolverSelector< typename M::matrix_type, X > SolverSelector;
551 
552  // Use superlu if we are purely sequential or with only one processor on the coarsest level.
553  if( SolverSelector::isDirectSolver &&
554  (std::is_same<ParallelInformation,SequentialInformation>::value // sequential mode
555  || matrices_->parallelInformation().coarsest()->communicator().size()==1 //parallel mode and only one processor
556  || (matrices_->parallelInformation().coarsest().isRedistributed()
557  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
558  && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0) )
559  )
560  { // redistribute and 1 proc
561  if(matrices_->parallelInformation().coarsest().isRedistributed())
562  {
563  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
564  {
565  // We are still participating on this level
566  solver_.reset(SolverSelector::create(matrices_->matrices().coarsest().getRedistributed().getmat(), false, false));
567  }
568  else
569  solver_.reset();
570  }
571  else
572  {
573  solver_.reset(SolverSelector::create(matrices_->matrices().coarsest()->getmat(), false, false));
574  }
575  if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
576  std::cout<< "Using a direct coarse solver (" << SolverSelector::name() << ")" << std::endl;
577  }
578  else
579  {
580  if(matrices_->parallelInformation().coarsest().isRedistributed())
581  {
582  if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
583  // We are still participating on this level
584 
585  // we have to allocate these types using the rebound allocator
586  // in order to ensure that we fulfill the alignement requirements
587  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(matrices_->matrices().coarsest().getRedistributed()),
588  // Cast needed for Dune <=2.5
589  reinterpret_cast<typename
590  std::conditional<std::is_same<PI,SequentialInformation>::value,
591  Dune::SeqScalarProduct<X>,
592  Dune::OverlappingSchwarzScalarProduct<X,PI> >::type&>(*scalarProduct_),
593  *coarseSmoother_, 1E-2, 1000, 0));
594  else
595  solver_.reset();
596  }else
597  {
598  solver_.reset(new BiCGSTABSolver<X>(const_cast<M&>(*matrices_->matrices().coarsest()),
599  // Cast needed for Dune <=2.5
600  reinterpret_cast<typename
601  std::conditional<std::is_same<PI,SequentialInformation>::value,
602  Dune::SeqScalarProduct<X>,
603  Dune::OverlappingSchwarzScalarProduct<X,PI> >::type&>(*scalarProduct_),
604  *coarseSmoother_, 1E-2, 1000, 0));
605  // // we have to allocate these types using the rebound allocator
606  // // in order to ensure that we fulfill the alignement requirements
607  // using Alloc = typename A::template rebind<BiCGSTABSolver<X>>::other;
608  // Alloc alloc;
609  // auto p = alloc.allocate(1);
610  // alloc.construct(p,
611  // const_cast<M&>(*matrices_->matrices().coarsest()),
612  // *scalarProduct_,
613  // *coarseSmoother_, 1E-2, 1000, 0);
614  // solver_.reset(p,[](BiCGSTABSolver<X>* p){
615  // Alloc alloc;
616  // alloc.destroy(p);
617  // alloc.deallocate(p,1);
618  // });
619  }
620  }
621  }
622  }
623 
624  template<class M, class X, class S, class PI, class A>
626  {
627  // Detect Matrix rows where all offdiagonal entries are
628  // zero and set x such that A_dd*x_d=b_d
629  // Thus users can be more careless when setting up their linear
630  // systems.
631  typedef typename M::matrix_type Matrix;
632  typedef typename Matrix::ConstRowIterator RowIter;
633  typedef typename Matrix::ConstColIterator ColIter;
634  typedef typename Matrix::block_type Block;
635  Block zero;
636  zero=typename Matrix::field_type();
637 
638  const Matrix& mat=matrices_->matrices().finest()->getmat();
639  for(RowIter row=mat.begin(); row!=mat.end(); ++row) {
640  bool isDirichlet = true;
641  bool hasDiagonal = false;
642  Block diagonal;
643  for(ColIter col=row->begin(); col!=row->end(); ++col) {
644  if(row.index()==col.index()) {
645  diagonal = *col;
646  hasDiagonal = false;
647  }else{
648  if(*col!=zero)
649  isDirichlet = false;
650  }
651  }
652  if(isDirichlet && hasDiagonal)
653  diagonal.solve(x[row.index()], b[row.index()]);
654  }
655 
656  if(smoothers_->levels()>0)
657  smoothers_->finest()->pre(x,b);
658  else
659  // No smoother to make x consistent! Do it by hand
660  matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
661 
662 
663 #if DUNE_VERSION_NEWER( DUNE_ISTL, 2, 7)
664  typedef std::shared_ptr< Range > RangePtr ;
665  typedef std::shared_ptr< Domain > DomainPtr;
666 #else
667  typedef Range* RangePtr;
668  typedef Domain* DomainPtr;
669 #endif
670 
671  // Hierarchy takes ownership of pointers
672  RangePtr copy( new Range(b) );
673  rhs_.reset( new Hierarchy<Range,A>(copy) );
674  DomainPtr dcopy( new Domain(x) );
675  lhs_.reset( new Hierarchy<Domain,A>(dcopy) );
676  DomainPtr dcopy2( new Domain(x) );
677  update_.reset( new Hierarchy<Domain,A>(dcopy2) );
678 
679  matrices_->coarsenVector(*rhs_);
680  matrices_->coarsenVector(*lhs_);
681  matrices_->coarsenVector(*update_);
682 
683  // Preprocess all smoothers
684  typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
685  typedef typename Hierarchy<Range,A>::Iterator RIterator;
686  typedef typename Hierarchy<Domain,A>::Iterator DIterator;
687  Iterator coarsest = smoothers_->coarsest();
688  Iterator smoother = smoothers_->finest();
689  RIterator rhs = rhs_->finest();
690  DIterator lhs = lhs_->finest();
691  if(smoothers_->levels()>0) {
692 
693  assert(lhs_->levels()==rhs_->levels());
694  assert(smoothers_->levels()==lhs_->levels() || matrices_->levels()==matrices_->maxlevels());
695  assert(smoothers_->levels()+1==lhs_->levels() || matrices_->levels()<matrices_->maxlevels());
696 
697  if(smoother!=coarsest)
698  for(++smoother, ++lhs, ++rhs; smoother != coarsest; ++smoother, ++lhs, ++rhs)
699  smoother->pre(*lhs,*rhs);
700  smoother->pre(*lhs,*rhs);
701  }
702 
703 
704  // The preconditioner might change x and b. So we have to
705  // copy the changes to the original vectors.
706  x = *lhs_->finest();
707  b = *rhs_->finest();
708 
709  }
710  template<class M, class X, class S, class PI, class A>
711  std::size_t AMGCPR<M,X,S,PI,A>::levels()
712  {
713  return matrices_->levels();
714  }
715  template<class M, class X, class S, class PI, class A>
716  std::size_t AMGCPR<M,X,S,PI,A>::maxlevels()
717  {
718  return matrices_->maxlevels();
719  }
720 
722  template<class M, class X, class S, class PI, class A>
724  {
725  LevelContext levelContext;
726 
727  if(additive) {
728  *(rhs_->finest())=d;
729  additiveMgc();
730  v=*lhs_->finest();
731  }else{
732  // Init all iterators for the current level
733  initIteratorsWithFineLevel(levelContext);
734 
735 
736  *levelContext.lhs = v;
737  *levelContext.rhs = d;
738  *levelContext.update=0;
739  levelContext.level=0;
740 
741  mgc(levelContext);
742 
743  if(postSteps_==0||matrices_->maxlevels()==1)
744  levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
745 
746  v=*levelContext.update;
747  }
748 
749  }
750 
751  template<class M, class X, class S, class PI, class A>
752  void AMGCPR<M,X,S,PI,A>::initIteratorsWithFineLevel(LevelContext& levelContext)
753  {
754  levelContext.smoother = smoothers_->finest();
755  levelContext.matrix = matrices_->matrices().finest();
756  levelContext.pinfo = matrices_->parallelInformation().finest();
757  levelContext.redist =
758  matrices_->redistributeInformation().begin();
759  levelContext.aggregates = matrices_->aggregatesMaps().begin();
760  levelContext.lhs = lhs_->finest();
761  levelContext.update = update_->finest();
762  levelContext.rhs = rhs_->finest();
763  }
764 
765  template<class M, class X, class S, class PI, class A>
766  bool AMGCPR<M,X,S,PI,A>
767  ::moveToCoarseLevel(LevelContext& levelContext)
768  {
769 
770  bool processNextLevel=true;
771 
772  if(levelContext.redist->isSetup()) {
773  levelContext.redist->redistribute(static_cast<const Range&>(*levelContext.rhs),
774  levelContext.rhs.getRedistributed());
775  processNextLevel = levelContext.rhs.getRedistributed().size()>0;
776  if(processNextLevel) {
777  //restrict defect to coarse level right hand side.
778  typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
779  ++levelContext.pinfo;
780  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
781  ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
782  static_cast<const Range&>(fineRhs.getRedistributed()),
783  *levelContext.pinfo);
784  }
785  }else{
786  //restrict defect to coarse level right hand side.
787  typename Hierarchy<Range,A>::Iterator fineRhs = levelContext.rhs++;
788  ++levelContext.pinfo;
789  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
790  ::restrictVector(*(*levelContext.aggregates),
791  *levelContext.rhs, static_cast<const Range&>(*fineRhs),
792  *levelContext.pinfo);
793  }
794 
795  if(processNextLevel) {
796  // prepare coarse system
797  ++levelContext.lhs;
798  ++levelContext.update;
799  ++levelContext.matrix;
800  ++levelContext.level;
801  ++levelContext.redist;
802 
803  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
804  // next level is not the globally coarsest one
805  ++levelContext.smoother;
806  ++levelContext.aggregates;
807  }
808  // prepare the update on the next level
809  *levelContext.update=0;
810  }
811  return processNextLevel;
812  }
813 
814  template<class M, class X, class S, class PI, class A>
815  void AMGCPR<M,X,S,PI,A>
816  ::moveToFineLevel(LevelContext& levelContext, bool processNextLevel)
817  {
818  if(processNextLevel) {
819  if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
820  // previous level is not the globally coarsest one
821  --levelContext.smoother;
822  --levelContext.aggregates;
823  }
824  --levelContext.redist;
825  --levelContext.level;
826  //prolongate and add the correction (update is in coarse left hand side)
827  --levelContext.matrix;
828 
829  //typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--;
830  --levelContext.lhs;
831  --levelContext.pinfo;
832  }
833  if(levelContext.redist->isSetup()) {
834  // Need to redistribute during prolongateVector
835  levelContext.lhs.getRedistributed()=0;
836  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
837  ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
838  levelContext.lhs.getRedistributed(),
839  matrices_->getProlongationDampingFactor(),
840  *levelContext.pinfo, *levelContext.redist);
841  }else{
842  *levelContext.lhs=0;
843  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
844  ::prolongateVector(*(*levelContext.aggregates), *levelContext.update, *levelContext.lhs,
845  matrices_->getProlongationDampingFactor(),
846  *levelContext.pinfo);
847  }
848 
849 
850  if(processNextLevel) {
851  --levelContext.update;
852  --levelContext.rhs;
853  }
854 
855  *levelContext.update += *levelContext.lhs;
856  }
857 
858  template<class M, class X, class S, class PI, class A>
860  {
861  return IsDirectSolver< CoarseSolver>::value;
862  }
863 
864  template<class M, class X, class S, class PI, class A>
865  void AMGCPR<M,X,S,PI,A>::mgc(LevelContext& levelContext){
866  if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
867  // Solve directly
868  InverseOperatorResult res;
869  res.converged=true; // If we do not compute this flag will not get updated
870  if(levelContext.redist->isSetup()) {
871  levelContext.redist->redistribute(*levelContext.rhs, levelContext.rhs.getRedistributed());
872  if(levelContext.rhs.getRedistributed().size()>0) {
873  // We are still participating in the computation
874  levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
875  levelContext.rhs.getRedistributed());
876  solver_->apply(levelContext.update.getRedistributed(),
877  levelContext.rhs.getRedistributed(), res);
878  }
879  levelContext.redist->redistributeBackward(*levelContext.update, levelContext.update.getRedistributed());
880  levelContext.pinfo->copyOwnerToAll(*levelContext.update, *levelContext.update);
881  }else{
882  levelContext.pinfo->copyOwnerToAll(*levelContext.rhs, *levelContext.rhs);
883  solver_->apply(*levelContext.update, *levelContext.rhs, res);
884  }
885 
886  if (!res.converged)
887  coarsesolverconverged = false;
888  }else{
889  // presmoothing
890  presmooth(levelContext, preSteps_);
891 
892 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
893  bool processNextLevel = moveToCoarseLevel(levelContext);
894 
895  if(processNextLevel) {
896  // next level
897  for(std::size_t i=0; i<gamma_; i++)
898  mgc(levelContext);
899  }
900 
901  moveToFineLevel(levelContext, processNextLevel);
902 #else
903  *lhs=0;
904 #endif
905 
906  if(levelContext.matrix == matrices_->matrices().finest()) {
907  coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
908  if(!coarsesolverconverged){
909  //DUNE_THROW(MathError, "Coarse solver did not converge");
910  }
911  }
912  // postsmoothing
913  postsmooth(levelContext, postSteps_);
914 
915  }
916  }
917 
918  template<class M, class X, class S, class PI, class A>
919  void AMGCPR<M,X,S,PI,A>::additiveMgc(){
920 
921  // restrict residual to all levels
922  typename ParallelInformationHierarchy::Iterator pinfo=matrices_->parallelInformation().finest();
923  typename Hierarchy<Range,A>::Iterator rhs=rhs_->finest();
924  typename Hierarchy<Domain,A>::Iterator lhs = lhs_->finest();
925  typename OperatorHierarchy::AggregatesMapList::const_iterator aggregates=matrices_->aggregatesMaps().begin();
926 
927  for(typename Hierarchy<Range,A>::Iterator fineRhs=rhs++; fineRhs != rhs_->coarsest(); fineRhs=rhs++, ++aggregates) {
928  ++pinfo;
929  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
930  ::restrictVector(*(*aggregates), *rhs, static_cast<const Range&>(*fineRhs), *pinfo);
931  }
932 
933  // pinfo is invalid, set to coarsest level
934  //pinfo = matrices_->parallelInformation().coarsest
935  // calculate correction for all levels
936  lhs = lhs_->finest();
937  typename Hierarchy<Smoother,A>::Iterator smoother = smoothers_->finest();
938 
939  for(rhs=rhs_->finest(); rhs != rhs_->coarsest(); ++lhs, ++rhs, ++smoother) {
940  // presmoothing
941  *lhs=0;
942  smoother->apply(*lhs, *rhs);
943  }
944 
945  // Coarse level solve
946 #ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
947  InverseOperatorResult res;
948  pinfo->copyOwnerToAll(*rhs, *rhs);
949  solver_->apply(*lhs, *rhs, res);
950 
951  if(!res.converged)
952  DUNE_THROW(MathError, "Coarse solver did not converge");
953 #else
954  *lhs=0;
955 #endif
956  // Prologate and add up corrections from all levels
957  --pinfo;
958  --aggregates;
959 
960  for(typename Hierarchy<Domain,A>::Iterator coarseLhs = lhs--; coarseLhs != lhs_->finest(); coarseLhs = lhs--, --aggregates, --pinfo) {
961  Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
962  ::prolongateVector(*(*aggregates), *coarseLhs, *lhs, 1.0, *pinfo);
963  }
964  }
965 
966 
968  template<class M, class X, class S, class PI, class A>
969  void AMGCPR<M,X,S,PI,A>::post([[maybe_unused]] Domain& x)
970  {
971  // Postprocess all smoothers
972  typedef typename Hierarchy<Smoother,A>::Iterator Iterator;
973  typedef typename Hierarchy<Domain,A>::Iterator DIterator;
974  Iterator coarsest = smoothers_->coarsest();
975  Iterator smoother = smoothers_->finest();
976  DIterator lhs = lhs_->finest();
977  if(smoothers_->levels()>0) {
978  if(smoother != coarsest || matrices_->levels()<matrices_->maxlevels())
979  smoother->post(*lhs);
980  if(smoother!=coarsest)
981  for(++smoother, ++lhs; smoother != coarsest; ++smoother, ++lhs)
982  smoother->post(*lhs);
983  smoother->post(*lhs);
984  }
985  //delete &(*lhs_->finest());
986  lhs_.reset();
987  //delete &(*update_->finest());
988  update_.reset();
989  //delete &(*rhs_->finest());
990  rhs_.reset();
991  }
992 
993  template<class M, class X, class S, class PI, class A>
994  template<class A1>
995  void AMGCPR<M,X,S,PI,A>::getCoarsestAggregateNumbers(std::vector<std::size_t,A1>& cont)
996  {
997  matrices_->getCoarsestAggregatesOnFinest(cont);
998  }
999 
1000  } // end namespace Amg
1001 } // end namespace Dune
1002 
1003 #endif
Parallel algebraic multigrid based on agglomeration.
Definition: amgcpr.hh:87
Definition: amgcpr.hh:69
Definition: amgcpr.hh:72
Interface class adding the update() method to the preconditioner interface.
Definition: PreconditionerWithUpdate.hpp:32
M Operator
The matrix operator type.
Definition: amgcpr.hh:95
Hierarchy< Smoother, A >::Iterator smoother
The iterator over the smoothers.
Definition: amgcpr.hh:294
AMGCPR(const OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const SmootherArgs &smootherArgs, const Parameters &parms)
Construct a new amg with a specific coarse solver.
Definition: amgcpr.hh:414
Hierarchy< Domain, A >::Iterator update
The iterator over the updates.
Definition: amgcpr.hh:318
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition: amgcpr.hh:322
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition: amgcpr.hh:104
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition: amgcpr.hh:302
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition: amgcpr.hh:166
void apply(Domain &v, const Range &d)
Definition: amgcpr.hh:723
void pre(Domain &x, Range &b)
Definition: amgcpr.hh:625
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition: amgcpr.hh:298
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition: amgcpr.hh:995
void post(Domain &x)
Definition: amgcpr.hh:969
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition: amgcpr.hh:106
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition: amgcpr.hh:193
SmootherTraits< Smoother >::Arguments SmootherArgs
The argument type for the construction of the smoother.
Definition: amgcpr.hh:122
X Range
The range type.
Definition: amgcpr.hh:111
X Domain
The domain type.
Definition: amgcpr.hh:109
S Smoother
The type of the smoother.
Definition: amgcpr.hh:119
void updateSolver(C &criterion, const Operator &, const PI &pinfo)
Update the coarse solver and the hierarchies.
Definition: amgcpr.hh:468
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition: amgcpr.hh:310
virtual void update()
Update the coarse solver and the hierarchies.
Definition: amgcpr.hh:474
PI ParallelInformation
The type of the parallel information.
Definition: amgcpr.hh:102
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition: amgcpr.hh:113
std::size_t level
The level index.
Definition: amgcpr.hh:326
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition: amgcpr.hh:314
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition: amgcpr.hh:859
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition: amgcpr.hh:306