dune-pdelab  2.5-dev
parallelhelper.hh
Go to the documentation of this file.
1 // -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=8 sw=2 sts=2:
3 #ifndef DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
4 #define DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
5 
6 // this is here for backwards compatibility and deprecation warnings, remove after 2.5.0
7 #include "ensureistlinclude.hh"
8 
9 #include <limits>
10 
11 #include <dune/common/parallel/mpihelper.hh>
12 #include <dune/common/stdstreams.hh>
13 #include <dune/common/typetraits.hh>
14 
15 #if HAVE_UG && PDELAB_SEQUENTIAL_UG
16 // We need the UGGrid declaration for the assertion
17 #include <dune/grid/uggrid.hh>
18 #endif
19 
20 #include <dune/istl/owneroverlapcopy.hh>
21 #include <dune/istl/solvercategory.hh>
22 #include <dune/istl/operators.hh>
23 #include <dune/istl/solvers.hh>
24 #include <dune/istl/preconditioners.hh>
25 #include <dune/istl/scalarproducts.hh>
26 #include <dune/istl/paamg/amg.hh>
27 #include <dune/istl/paamg/pinfo.hh>
28 #include <dune/istl/io.hh>
29 #include <dune/istl/superlu.hh>
30 
37 
38 namespace Dune {
39  namespace PDELab {
40  namespace ISTL {
41 
45 
46 
47  //========================================================
48  // A parallel helper class providing a nonoverlapping
49  // decomposition of all degrees of freedom
50  //========================================================
51 
52  template<typename GFS>
54  {
55 
57  typedef int RankIndex;
58 
62  using GhostVector = Dune::PDELab::Backend::Vector<GFS,bool>;
63 
65  typedef typename GFS::Ordering::Traits::ContainerIndex ContainerIndex;
66 
67  public:
68 
69  ParallelHelper (const GFS& gfs, int verbose = 1)
70  : _gfs(gfs)
71  , _rank(gfs.gridView().comm().rank())
72  , _ranks(gfs,_rank)
73  , _ghosts(gfs,false)
74  , _verbose(verbose)
75  {
76 
77  // Let's try to be clever and reduce the communication overhead by picking the smallest
78  // possible communication interface depending on the overlap structure of the GFS.
79  // FIXME: Switch to simple comparison as soon as dune-grid:1b3e83ec0 is reliably available!
80  if (gfs.entitySet().partitions().value == Partitions::interiorBorder.value)
81  {
82  // The GFS only spans the interior and border partitions, so we can skip sending or
83  // receiving anything else.
84  _interiorBorder_all_interface = InteriorBorder_InteriorBorder_Interface;
85  _all_all_interface = InteriorBorder_InteriorBorder_Interface;
86  }
87  else
88  {
89  // In general, we have to transmit more.
90  _interiorBorder_all_interface = InteriorBorder_All_Interface;
91  _all_all_interface = All_All_Interface;
92  }
93 
94  if (_gfs.gridView().comm().size()>1)
95  {
96 
97  // find out about ghosts
98  //GFSDataHandle<GFS,GhostVector,GhostGatherScatter>
100  gdh(_gfs,_ghosts,false);
101  _gfs.gridView().communicate(gdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
102 
103  // create disjoint DOF partitioning
104  // GFSDataHandle<GFS,RankVector,DisjointPartitioningGatherScatter<RankIndex> >
105  // ibdh(_gfs,_ranks,DisjointPartitioningGatherScatter<RankIndex>(_rank));
107  _gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
108 
109  }
110 
111  }
112 
114  template<typename X>
115  void maskForeignDOFs(X& x) const
116  {
117  using Backend::native;
118  // dispatch to implementation.
120  }
121 
122  private:
123 
124  // Implementation for block vector; recursively masks blocks.
125  template<typename X, typename Mask>
126  void maskForeignDOFs(ISTL::tags::block_vector, X& x, const Mask& mask) const
127  {
128  typename Mask::const_iterator mask_it = mask.begin();
129  for (typename X::iterator it = x.begin(),
130  end_it = x.end();
131  it != end_it;
132  ++it, ++mask_it)
133  maskForeignDOFs(ISTL::container_tag(*it),*it,*mask_it);
134  }
135 
136  // Implementation for field vector, iterates over entries and masks them individually.
137  template<typename X, typename Mask>
138  void maskForeignDOFs(ISTL::tags::field_vector, X& x, const Mask& mask) const
139  {
140  typename Mask::const_iterator mask_it = mask.begin();
141  for (typename X::iterator it = x.begin(),
142  end_it = x.end();
143  it != end_it;
144  ++it, ++mask_it)
145  *it = (*mask_it == _rank ? *it : typename X::field_type(0));
146  }
147 
148  public:
149 
151  bool owned(const ContainerIndex& i) const
152  {
153  return _ranks[i] == _rank;
154  }
155 
157  bool isGhost(const ContainerIndex& i) const
158  {
159  return _ghosts[i];
160  }
161 
163  template<typename X, typename Y>
164  typename PromotionTraits<
165  typename X::field_type,
166  typename Y::field_type
167  >::PromotedType
168  disjointDot(const X& x, const Y& y) const
169  {
170  using Backend::native;
172  native(x),
173  native(y),
174  native(_ranks)
175  );
176  }
177 
178  private:
179 
180  // Implementation for BlockVector, collects the result of recursively
181  // invoking the algorithm on the vector blocks.
182  template<typename X, typename Y, typename Mask>
183  typename PromotionTraits<
184  typename X::field_type,
185  typename Y::field_type
186  >::PromotedType
187  disjointDot(ISTL::tags::block_vector, const X& x, const Y& y, const Mask& mask) const
188  {
189  typedef typename PromotionTraits<
190  typename X::field_type,
191  typename Y::field_type
192  >::PromotedType result_type;
193 
194  result_type r(0);
195 
196  typename Y::const_iterator y_it = y.begin();
197  typename Mask::const_iterator mask_it = mask.begin();
198  for (typename X::const_iterator x_it = x.begin(),
199  end_it = x.end();
200  x_it != end_it;
201  ++x_it, ++y_it, ++mask_it)
202  r += disjointDot(ISTL::container_tag(*x_it),*x_it,*y_it,*mask_it);
203 
204  return r;
205  }
206 
207  // Implementation for FieldVector, iterates over the entries and calls Dune::dot() for DOFs
208  // associated with the current rank.
209  template<typename X, typename Y, typename Mask>
210  typename PromotionTraits<
211  typename X::field_type,
212  typename Y::field_type
213  >::PromotedType
214  disjointDot(ISTL::tags::field_vector, const X& x, const Y& y, const Mask& mask) const
215  {
216  typedef typename PromotionTraits<
217  typename X::field_type,
218  typename Y::field_type
219  >::PromotedType result_type;
220 
221  result_type r(0);
222 
223  typename Y::const_iterator y_it = y.begin();
224  typename Mask::const_iterator mask_it = mask.begin();
225  for (typename X::const_iterator x_it = x.begin(),
226  end_it = x.end();
227  x_it != end_it;
228  ++x_it, ++y_it, ++mask_it)
229  r += (*mask_it == _rank ? Dune::dot(*x_it,*y_it) : result_type(0));
230 
231  return r;
232  }
233 
234  public:
235 
237  RankIndex rank() const
238  {
239  return _rank;
240  }
241 
242 #if HAVE_MPI
243 
245 
261  template<typename MatrixType, typename Comm>
262  void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c);
263 
264  private:
265 
266  // Checks whether a matrix block is owned by the current process. Used for the AMG
267  // construction and thus assumes a single level of blocking and blocks with ownership
268  // restricted to a single DOF.
269  bool owned_for_amg(std::size_t i) const
270  {
271  return Backend::native(_ranks)[i][0] == _rank;
272  }
273 
274 #endif // HAVE_MPI
275 
276  private:
277 
278  const GFS& _gfs;
279  const RankIndex _rank;
280  RankVector _ranks; // vector to identify unique decomposition
281  GhostVector _ghosts; //vector to identify ghost dofs
282  int _verbose; //verbosity
283 
285  InterfaceType _interiorBorder_all_interface;
286 
288  InterfaceType _all_all_interface;
289  };
290 
291 #if HAVE_MPI
292 
293  template<typename GFS>
294  template<typename M, typename C>
296  {
297 
298  using Backend::native;
299 
300  const bool is_bcrs_matrix =
301  std::is_same<
302  typename ISTL::tags::container<
304  >::type::base_tag,
306  >::value;
307 
308  const bool block_type_is_field_matrix =
309  std::is_same<
310  typename ISTL::tags::container<
312  >::type::base_tag,
314  >::value;
315 
316  // We assume M to be a BCRSMatrix in the following, so better check for that
317  static_assert(is_bcrs_matrix && block_type_is_field_matrix, "matrix structure not compatible with AMG");
318 
319  // ********************************************************************************
320  // In the following, the code will always assume that all DOFs stored in a single
321  // block of the BCRSMatrix are attached to the same entity and can be handled
322  // identically. For that reason, the code often restricts itself to inspecting the
323  // first entry of the blocks in the diverse BlockVectors.
324  // ********************************************************************************
325 
326  typedef typename GFS::Traits::GridViewType GV;
327  typedef typename RankVector::size_type size_type;
328  const GV& gv = _gfs.gridView();
329 
330  // Do we need to communicate at all?
331  const bool need_communication = _gfs.gridView().comm().size() > 1;
332 
333  // First find out which dofs we share with other processors
334  using BoolVector = Backend::Vector<GFS,bool>;
335  BoolVector sharedDOF(_gfs, false);
336 
337  if (need_communication)
338  {
339  SharedDOFDataHandle<GFS,BoolVector> data_handle(_gfs,sharedDOF,false);
340  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
341  }
342 
343  // Count shared dofs that we own
344  typedef typename C::ParallelIndexSet::GlobalIndex GlobalIndex;
345  GlobalIndex count = 0;
346 
347  for (size_type i = 0; i < sharedDOF.N(); ++i)
348  if (owned_for_amg(i) && native(sharedDOF)[i][0])
349  ++count;
350 
351  dverb << gv.comm().rank() << ": shared block count is " << count.touint() << std::endl;
352 
353  // Communicate per-rank count of owned and shared DOFs to all processes.
354  std::vector<GlobalIndex> counts(_gfs.gridView().comm().size());
355  _gfs.gridView().comm().allgather(&count, 1, &(counts[0]));
356 
357  // Compute start index start_p = \sum_{i=0}^{i<p} counts_i
358  GlobalIndex start = std::accumulate(counts.begin(),counts.begin() + _rank,GlobalIndex(0));
359 
361  GIVector scalarIndices(_gfs, std::numeric_limits<GlobalIndex>::max());
362 
363  for (size_type i = 0; i < sharedDOF.N(); ++i)
364  if (owned_for_amg(i) && native(sharedDOF)[i][0])
365  {
366  native(scalarIndices)[i][0] = start;
367  ++start;
368  }
369 
370  // Publish global indices for the shared DOFS to other processors.
371  if (need_communication)
372  {
373  MinDataHandle<GFS,GIVector> data_handle(_gfs,scalarIndices);
374  _gfs.gridView().communicate(data_handle,_interiorBorder_all_interface,Dune::ForwardCommunication);
375  }
376 
377  // Setup the index set
378  c.indexSet().beginResize();
379  for (size_type i=0; i<scalarIndices.N(); ++i)
380  {
381  Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr;
382  if(native(scalarIndices)[i][0] != std::numeric_limits<GlobalIndex>::max())
383  {
384  // global index exist in index set
385  if (owned_for_amg(i))
386  {
387  // This dof is managed by us.
388  attr = Dune::OwnerOverlapCopyAttributeSet::owner;
389  }
390  else
391  {
392  attr = Dune::OwnerOverlapCopyAttributeSet::copy;
393  }
394  c.indexSet().add(native(scalarIndices)[i][0], typename C::ParallelIndexSet::LocalIndex(i,attr));
395  }
396  }
397  c.indexSet().endResize();
398 
399  // Compute neighbors using communication
400  std::set<int> neighbors;
401 
402  if (need_communication)
403  {
404  GFSNeighborDataHandle<GFS,int> data_handle(_gfs,_rank,neighbors);
405  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
406  }
407 
408  c.remoteIndices().setNeighbours(neighbors);
409  c.remoteIndices().template rebuild<false>();
410  }
411 
412 #endif // HAVE_MPI
413 
414  template<int s, bool isFakeMPIHelper>
416  {
417  typedef Dune::Amg::SequentialInformation type;
418  };
419 
420 
421 #if HAVE_MPI
422 
423  // Need MPI for OwnerOverlapCopyCommunication
424  template<int s>
425  struct CommSelector<s,false>
426  {
427  typedef OwnerOverlapCopyCommunication<bigunsignedint<s>,int> type;
428  };
429 
430 #endif // HAVE_MPI
431 
432  template<typename T>
433  void assertParallelUG(T comm)
434  {}
435 
436 #if HAVE_UG && PDELAB_SEQUENTIAL_UG
437  template<int dim>
438  void assertParallelUG(Dune::CollectiveCommunication<Dune::UGGrid<dim> > comm)
439  {
440  static_assert(Dune::AlwaysFalse<Dune::UGGrid<dim> >::value, "Using sequential UG in parallel environment");
441  };
442 #endif
443 
445  } // namespace ISTL
446  } // namespace PDELab
447 } // namespace Dune
448 
449 #endif // DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
void maskForeignDOFs(X &x) const
Mask out all DOFs not owned by the current process with 0.
Definition: parallelhelper.hh:115
typename impl::BackendVectorSelector< GridFunctionSpace, FieldType >::Type Vector
alias of the return type of BackendVectorSelector
Definition: backend/interface.hh:106
tags::container< T >::type container_tag(const T &)
Gets instance of container tag associated with T.
Definition: backend/istl/tags.hh:249
Data handle for marking ghost DOFs.
Definition: genericdatahandle.hh:812
typename native_type< T >::type Native
Alias of the native container type associated with T or T itself if it is not a backend wrapper...
Definition: backend/interface.hh:176
ParallelHelper(const GFS &gfs, int verbose=1)
Definition: parallelhelper.hh:69
Data handle for collecting set of neighboring MPI ranks.
Definition: genericdatahandle.hh:1060
GatherScatter data handle for creating a disjoint DOF partitioning.
Definition: genericdatahandle.hh:930
Definition: parallelhelper.hh:53
const std::string s
Definition: function.hh:1101
Tag describing an arbitrary FieldVector.
Definition: backend/istl/tags.hh:46
For backward compatibility – Do not use this!
Definition: adaptivity.hh:27
Definition: genericdatahandle.hh:716
bool owned(const ContainerIndex &i) const
Tests whether the given index is owned by this process.
Definition: parallelhelper.hh:151
Tag describing an arbitrary FieldMatrix.
Definition: backend/istl/tags.hh:83
Extracts the container tag from T.
Definition: backend/istl/tags.hh:145
PromotionTraits< typename X::field_type, typename Y::field_type >::PromotedType disjointDot(const X &x, const Y &y) const
Calculates the (rank-local) dot product of x and y on the disjoint partition defined by the helper...
Definition: parallelhelper.hh:168
Data handle for marking shared DOFs.
Definition: genericdatahandle.hh:1014
void createIndexSetAndProjectForAMG(MatrixType &m, Comm &c)
Makes the matrix consistent and creates the parallel information for AMG.
static const unsigned int value
Definition: gridfunctionspace/tags.hh:139
OwnerOverlapCopyCommunication< bigunsignedint< s >, int > type
Definition: parallelhelper.hh:427
Definition: parallelhelper.hh:415
Tag describing a BlockVector.
Definition: backend/istl/tags.hh:26
RankIndex rank() const
Returns the MPI rank of this process.
Definition: parallelhelper.hh:237
Dune::Amg::SequentialInformation type
Definition: parallelhelper.hh:417
std::enable_if< std::is_base_of< impl::WrapperBase, T >::value, Native< T > &>::type native(T &t)
Definition: backend/interface.hh:192
Tag describing a BCRSMatrix.
Definition: backend/istl/tags.hh:63
bool isGhost(const ContainerIndex &i) const
Tests whether the given index belongs to a ghost DOF.
Definition: parallelhelper.hh:157
void assertParallelUG(T comm)
Definition: parallelhelper.hh:433