libMesh
mesh_communication.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local Includes
21 #include "libmesh/boundary_info.h"
22 #include "libmesh/distributed_mesh.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/ghosting_functor.h"
25 #include "libmesh/libmesh_config.h"
26 #include "libmesh/libmesh_common.h"
27 #include "libmesh/libmesh_logging.h"
28 #include "libmesh/mesh_base.h"
29 #include "libmesh/mesh_communication.h"
30 #include "libmesh/mesh_inserter_iterator.h"
31 #include "libmesh/mesh_tools.h"
32 #include "libmesh/parallel.h"
33 #include "libmesh/parallel_elem.h"
34 #include "libmesh/parallel_node.h"
35 #include "libmesh/parallel_ghost_sync.h"
36 #include "libmesh/utility.h"
37 #include "libmesh/remote_elem.h"
38 
39 // C++ Includes
40 #include <numeric>
41 #include <set>
42 #include <unordered_set>
43 #include <unordered_map>
44 
45 
46 
47 //-----------------------------------------------
48 // anonymous namespace for implementation details
49 namespace {
50 
51 using namespace libMesh;
52 
53 struct SyncNeighbors
54 {
55  typedef std::vector<dof_id_type> datum;
56 
57  SyncNeighbors(MeshBase & _mesh) :
58  mesh(_mesh) {}
59 
60  MeshBase & mesh;
61 
62  // Find the neighbor ids for each requested element
63  void gather_data (const std::vector<dof_id_type> & ids,
64  std::vector<datum> & neighbors) const
65  {
66  neighbors.resize(ids.size());
67 
68  for (std::size_t i=0; i != ids.size(); ++i)
69  {
70  // Look for this element in the mesh
71  // We'd better find every element we're asked for
72  const Elem & elem = mesh.elem_ref(ids[i]);
73 
74  // Return the element's neighbors
75  const unsigned int n_neigh = elem.n_neighbors();
76  neighbors[i].resize(n_neigh);
77  for (unsigned int n = 0; n != n_neigh; ++n)
78  {
79  const Elem * neigh = elem.neighbor_ptr(n);
80  if (neigh)
81  {
82  libmesh_assert_not_equal_to(neigh, remote_elem);
83  neighbors[i][n] = neigh->id();
84  }
85  else
86  neighbors[i][n] = DofObject::invalid_id;
87  }
88  }
89  }
90 
91  void act_on_data (const std::vector<dof_id_type> & ids,
92  std::vector<datum> & neighbors) const
93  {
94  for (std::size_t i=0; i != ids.size(); ++i)
95  {
96  Elem & elem = mesh.elem_ref(ids[i]);
97 
98  datum & new_neigh = neighbors[i];
99 
100  const unsigned int n_neigh = elem.n_neighbors();
101  libmesh_assert_equal_to (n_neigh, new_neigh.size());
102 
103  for (unsigned int n = 0; n != n_neigh; ++n)
104  {
105  const dof_id_type new_neigh_id = new_neigh[n];
106  const Elem * old_neigh = elem.neighbor_ptr(n);
107  if (old_neigh && old_neigh != remote_elem)
108  {
109  libmesh_assert_equal_to(old_neigh->id(), new_neigh_id);
110  }
111  else if (new_neigh_id == DofObject::invalid_id)
112  {
113  libmesh_assert (!old_neigh);
114  }
115  else
116  {
117  Elem * neigh = mesh.query_elem_ptr(new_neigh_id);
118  if (neigh)
119  elem.set_neighbor(n, neigh);
120  else
121  elem.set_neighbor(n, const_cast<RemoteElem *>(remote_elem));
122  }
123  }
124  }
125  }
126 };
127 
128 
129 }
130 
131 
132 
133 namespace libMesh
134 {
135 
136 
138  processor_id_type pid,
139  bool newly_coarsened_only,
140  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
141 {
142  // This parameter is not used when !LIBMESH_ENABLE_AMR.
143  libmesh_ignore(newly_coarsened_only);
144 
145 #ifndef LIBMESH_ENABLE_AMR
146  libmesh_assert(!newly_coarsened_only);
147 #endif
148 
150 #ifdef LIBMESH_ENABLE_AMR
151  newly_coarsened_only ? mesh.flagged_pid_elements_begin(Elem::JUST_COARSENED, pid) :
152 #endif
153  mesh.active_pid_elements_begin(pid);
154 
155  const MeshBase::const_element_iterator elem_end =
156 #ifdef LIBMESH_ENABLE_AMR
157  newly_coarsened_only ? mesh.flagged_pid_elements_end(Elem::JUST_COARSENED, pid) :
158 #endif
159  mesh.active_pid_elements_end(pid);
160 
161  std::set<GhostingFunctor *>::iterator gf_it = mesh.ghosting_functors_begin();
162  const std::set<GhostingFunctor *>::iterator gf_end = mesh.ghosting_functors_end();
163  for (; gf_it != gf_end; ++gf_it)
164  {
165  GhostingFunctor::map_type elements_to_ghost;
166 
167  GhostingFunctor * gf = *gf_it;
168  libmesh_assert(gf);
169  (*gf)(elem_it, elem_end, pid, elements_to_ghost);
170 
171  // We can ignore the CouplingMatrix in ->second, but we
172  // need to ghost all the elements in ->first.
173  GhostingFunctor::map_type::iterator etg_it = elements_to_ghost.begin();
174  const GhostingFunctor::map_type::iterator etg_end = elements_to_ghost.end();
175  for (; etg_it != etg_end; ++etg_it)
176  {
177  const Elem * elem = etg_it->first;
178  libmesh_assert(elem != remote_elem);
179  connected_elements.insert(elem);
180  }
181  }
182 
183  // The GhostingFunctors won't be telling us about the elements from
184  // pid; we need to add those ourselves.
185  for (; elem_it != elem_end; ++elem_it)
186  connected_elements.insert(*elem_it);
187 }
188 
189 
190 void connect_children(const MeshBase & mesh,
191  processor_id_type pid,
192  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
193 {
194  // None of these parameters are used when !LIBMESH_ENABLE_AMR.
195  libmesh_ignore(mesh);
196  libmesh_ignore(pid);
197  libmesh_ignore(connected_elements);
198 
199 #ifdef LIBMESH_ENABLE_AMR
200  // Our XdrIO output needs inactive local elements to not have any
201  // remote_elem children. Let's make sure that doesn't happen.
202  //
204  const MeshBase::const_element_iterator elem_end = mesh.pid_elements_end(pid);
205  for (; elem_it != elem_end; ++elem_it)
206  {
207  const Elem * elem = *elem_it;
208  if (elem->has_children())
209  for (auto & child : elem->child_ref_range())
210  if (&child != remote_elem)
211  connected_elements.insert(&child);
212  }
213 #endif // LIBMESH_ENABLE_AMR
214 }
215 
216 
217 void connect_families(std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
218 {
219  // This parameter is not used when !LIBMESH_ENABLE_AMR.
220  libmesh_ignore(connected_elements);
221 
222 #ifdef LIBMESH_ENABLE_AMR
223 
224  // Because our set is sorted by ascending level, we can traverse it
225  // in reverse order, adding parents as we go, and end up with all
226  // ancestors added. This is safe for std::set where insert doesn't
227  // invalidate iterators.
228  //
229  // This only works because we do *not* cache
230  // connected_elements.rend(), whose value can change when we insert
231  // elements which are sorted before the original rend.
232  //
233  // We're also going to get subactive descendents here, when any
234  // exist. We're iterating in the wrong direction to do that
235  // non-recursively, so we'll cop out and rely on total_family_tree.
236  // Iterating backwards does mean that we won't be querying the newly
237  // inserted subactive elements redundantly.
238 
239  std::set<const Elem *, CompareElemIdsByLevel>::reverse_iterator
240  elem_rit = connected_elements.rbegin();
241 
242  for (; elem_rit != connected_elements.rend(); ++elem_rit)
243  {
244  const Elem * elem = *elem_rit;
245  libmesh_assert(elem);
246  const Elem * parent = elem->parent();
247 
248  // We let ghosting functors worry about only active elements,
249  // but the remote processor needs all its semilocal elements'
250  // ancestors and active semilocal elements' descendants too.
251  if (parent)
252  connected_elements.insert (parent);
253 
254  if (elem->active() && elem->has_children())
255  {
256  std::vector<const Elem *> subactive_family;
257  elem->total_family_tree(subactive_family);
258  for (std::size_t i=0; i != subactive_family.size(); ++i)
259  {
260  libmesh_assert(subactive_family[i] != remote_elem);
261  connected_elements.insert(subactive_family[i]);
262  }
263  }
264  }
265 
266 # ifdef DEBUG
267  // Let's be paranoid and make sure that all our ancestors
268  // really did get inserted. I screwed this up the first time
269  // by caching rend, and I can easily imagine screwing it up in
270  // the future by changing containers.
271  std::set<const Elem *, CompareElemIdsByLevel>::iterator
272  elem_it = connected_elements.begin(),
273  elem_end = connected_elements.end();
274 
275  for (; elem_it != elem_end; ++elem_it)
276  {
277  const Elem * elem = *elem_it;
278  libmesh_assert(elem);
279  const Elem * parent = elem->parent();
280  if (parent)
281  libmesh_assert(connected_elements.count(parent));
282  }
283 # endif // DEBUG
284 
285 #endif // LIBMESH_ENABLE_AMR
286 }
287 
288 
289 void reconnect_nodes (const std::set<const Elem *, CompareElemIdsByLevel> & connected_elements,
290  std::set<const Node *> & connected_nodes)
291 {
292  // We're done using the nodes list for element decisions; now
293  // let's reuse it for nodes of the elements we've decided on.
294  connected_nodes.clear();
295 
296  std::set<const Elem *, CompareElemIdsByLevel>::iterator
297  elem_it = connected_elements.begin(),
298  elem_end = connected_elements.end();
299 
300  for (; elem_it!=elem_end; ++elem_it)
301  {
302  const Elem * elem = *elem_it;
303 
304  for (auto & n : elem->node_ref_range())
305  connected_nodes.insert(&n);
306  }
307 }
308 
309 
310 
311 
312 // ------------------------------------------------------------
313 // MeshCommunication class members
315 {
316  // _neighboring_processors.clear();
317 }
318 
319 
320 
321 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
322 // ------------------------------------------------------------
324 {
325  // no MPI == one processor, no redistribution
326  return;
327 }
328 
329 #else
330 // ------------------------------------------------------------
332  bool newly_coarsened_only) const
333 {
334  // This method will be called after a new partitioning has been
335  // assigned to the elements. This partitioning was defined in
336  // terms of the active elements, and "trickled down" to the
337  // parents and nodes as to be consistent.
338  //
339  // The point is that the entire concept of local elements is
340  // kinda shaky in this method. Elements which were previously
341  // local may now be assigned to other processors, so we need to
342  // send those off. Similarly, we need to accept elements from
343  // other processors.
344 
345  // This method is also useful in the more limited case of
346  // post-coarsening redistribution: if elements are only ghosting
347  // neighbors of their active elements, but adaptive coarsening
348  // causes an inactive element to become active, then we may need a
349  // copy of that inactive element's neighbors.
350 
351  // The approach is as follows:
352  // (1) send all relevant elements we have stored to their proper homes
353  // (2) receive elements from all processors, watching for duplicates
354  // (3) deleting all nonlocal elements elements
355  // (4) obtaining required ghost elements from neighboring processors
356  libmesh_parallel_only(mesh.comm());
357  libmesh_assert (!mesh.is_serial());
359  mesh.unpartitioned_elements_end()) == 0);
360 
361  LOG_SCOPE("redistribute()", "MeshCommunication");
362 
363  // Get a few unique message tags to use in communications; we'll
364  // default to some numbers around pi*1000
366  nodestag = mesh.comm().get_unique_tag(3141),
367  elemstag = mesh.comm().get_unique_tag(3142);
368 
369  // Figure out how many nodes and elements we have which are assigned to each
370  // processor. send_n_nodes_and_elem_per_proc contains the number of nodes/elements
371  // we will be sending to each processor, recv_n_nodes_and_elem_per_proc contains
372  // the number of nodes/elements we will be receiving from each processor.
373  // Format:
374  // send_n_nodes_and_elem_per_proc[2*pid+0] = number of nodes to send to pid
375  // send_n_nodes_and_elem_per_proc[2*pid+1] = number of elements to send to pid
376  std::vector<dof_id_type> send_n_nodes_and_elem_per_proc(2*mesh.n_processors(), 0);
377 
378  std::vector<Parallel::Request>
379  node_send_requests, element_send_requests;
380 
381  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
382  if (pid != mesh.processor_id()) // don't send to ourselves!!
383  {
384  // Build up a list of nodes and elements to send to processor pid.
385  // We will certainly send all the elements assigned to this processor,
386  // but we will also ship off any elements which are required
387  // to be ghosted and any nodes which are used by any of the
388  // above.
389 
390  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
391 
392  // See which to-be-ghosted elements we need to send
393  query_ghosting_functors(mesh, pid, newly_coarsened_only, elements_to_send);
394 
395  // The inactive elements we need to send should have their
396  // immediate children present.
397  connect_children(mesh, pid, elements_to_send);
398 
399  // The elements we need should have their ancestors and their
400  // subactive children present too.
401  connect_families(elements_to_send);
402 
403  std::set<const Node *> connected_nodes;
404  reconnect_nodes(elements_to_send, connected_nodes);
405 
406  // the number of nodes we will ship to pid
407  send_n_nodes_and_elem_per_proc[2*pid+0] =
408  cast_int<dof_id_type>(connected_nodes.size());
409 
410  // send any nodes off to the destination processor
411  if (!connected_nodes.empty())
412  {
413  node_send_requests.push_back(Parallel::request());
414 
415  mesh.comm().send_packed_range (pid,
416  &mesh,
417  connected_nodes.begin(),
418  connected_nodes.end(),
419  node_send_requests.back(),
420  nodestag);
421  }
422 
423  // the number of elements we will send to this processor
424  send_n_nodes_and_elem_per_proc[2*pid+1] =
425  cast_int<dof_id_type>(elements_to_send.size());
426 
427  if (!elements_to_send.empty())
428  {
429  // send the elements off to the destination processor
430  element_send_requests.push_back(Parallel::request());
431 
432  mesh.comm().send_packed_range (pid,
433  &mesh,
434  elements_to_send.begin(),
435  elements_to_send.end(),
436  element_send_requests.back(),
437  elemstag);
438  }
439  }
440 
441  std::vector<dof_id_type> recv_n_nodes_and_elem_per_proc(send_n_nodes_and_elem_per_proc);
442 
443  mesh.comm().alltoall (recv_n_nodes_and_elem_per_proc);
444 
445  // In general we will only need to communicate with a subset of the other processors.
446  // I can't immediately think of a case where we will send elements but not nodes, but
447  // these are only bools and we have the information anyway...
448  std::vector<bool>
449  send_node_pair(mesh.n_processors(),false), send_elem_pair(mesh.n_processors(),false),
450  recv_node_pair(mesh.n_processors(),false), recv_elem_pair(mesh.n_processors(),false);
451 
452  unsigned int
453  n_send_node_pairs=0, n_send_elem_pairs=0,
454  n_recv_node_pairs=0, n_recv_elem_pairs=0;
455 
456  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
457  {
458  if (send_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to send
459  {
460  send_node_pair[pid] = true;
461  n_send_node_pairs++;
462  }
463 
464  if (send_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to send
465  {
466  send_elem_pair[pid] = true;
467  n_send_elem_pairs++;
468  }
469 
470  if (recv_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to receive
471  {
472  recv_node_pair[pid] = true;
473  n_recv_node_pairs++;
474  }
475 
476  if (recv_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to receive
477  {
478  recv_elem_pair[pid] = true;
479  n_recv_elem_pairs++;
480  }
481  }
482  libmesh_assert_equal_to (n_send_node_pairs, node_send_requests.size());
483  libmesh_assert_equal_to (n_send_elem_pairs, element_send_requests.size());
484 
485  // Receive nodes.
486  // We now know how many processors will be sending us nodes.
487  for (unsigned int node_comm_step=0; node_comm_step<n_recv_node_pairs; node_comm_step++)
488  // but we don't necessarily want to impose an ordering, so
489  // just process whatever message is available next.
491  &mesh,
494  nodestag);
495 
496  // Receive elements.
497  // Similarly we know how many processors are sending us elements,
498  // but we don't really care in what order we receive them.
499  for (unsigned int elem_comm_step=0; elem_comm_step<n_recv_elem_pairs; elem_comm_step++)
501  &mesh,
504  elemstag);
505 
506  // Wait for all sends to complete
507  Parallel::wait (node_send_requests);
508  Parallel::wait (element_send_requests);
509 
510  // Check on the redistribution consistency
511 #ifdef DEBUG
513 
515 #endif
516 
517  // If we had a point locator, it's invalid now that there are new
518  // elements it can't locate.
519  mesh.clear_point_locator();
520 
521  // We now have all elements and nodes redistributed; our ghosting
522  // functors should be ready to redistribute and/or recompute any
523  // cached data they use too.
524  std::set<GhostingFunctor *>::iterator gf_it = mesh.ghosting_functors_begin();
525  const std::set<GhostingFunctor *>::iterator gf_end = mesh.ghosting_functors_end();
526  for (; gf_it != gf_end; ++gf_it)
527  {
528  GhostingFunctor * gf = *gf_it;
529  gf->redistribute();
530  }
531 
532 }
533 #endif // LIBMESH_HAVE_MPI
534 
535 
536 
537 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
538 // ------------------------------------------------------------
540 {
541  // no MPI == one processor, no need for this method...
542  return;
543 }
544 #else
545 // ------------------------------------------------------------
547 {
548  // Don't need to do anything if there is
549  // only one processor.
550  if (mesh.n_processors() == 1)
551  return;
552 
553  // This function must be run on all processors at once
554  libmesh_parallel_only(mesh.comm());
555 
556  LOG_SCOPE("gather_neighboring_elements()", "MeshCommunication");
557 
558  //------------------------------------------------------------------
559  // The purpose of this function is to provide neighbor data structure
560  // consistency for a parallel, distributed mesh. In libMesh we require
561  // that each local element have access to a full set of valid face
562  // neighbors. In some cases this requires us to store "ghost elements" -
563  // elements that belong to other processors but we store to provide
564  // data structure consistency. Also, it is assumed that any element
565  // with a NULL neighbor resides on a physical domain boundary. So,
566  // even our "ghost elements" must have non-NULL neighbors. To handle
567  // this the concept of "RemoteElem" is used - a special construct which
568  // is used to denote that an element has a face neighbor, but we do
569  // not actually store detailed information about that neighbor. This
570  // is required to prevent data structure explosion.
571  //
572  // So when this method is called we should have only local elements.
573  // These local elements will then find neighbors among the local
574  // element set. After this is completed, any element with a NULL
575  // neighbor has either (i) a face on the physical boundary of the mesh,
576  // or (ii) a neighboring element which lives on a remote processor.
577  // To handle case (ii), we communicate the global node indices connected
578  // to all such faces to our neighboring processors. They then send us
579  // all their elements with a NULL neighbor that are connected to any
580  // of the nodes in our list.
581  //------------------------------------------------------------------
582 
583  // Let's begin with finding consistent neighbor data information
584  // for all the elements we currently have. We'll use a clean
585  // slate here - clear any existing information, including RemoteElem's.
586  mesh.find_neighbors (/* reset_remote_elements = */ true,
587  /* reset_current_list = */ true);
588 
589  // Get a unique message tag to use in communications; we'll default
590  // to some numbers around pi*10000
592  element_neighbors_tag = mesh.comm().get_unique_tag(31416);
593 
594  // Now any element with a NULL neighbor either
595  // (i) lives on the physical domain boundary, or
596  // (ii) lives on an inter-processor boundary.
597  // We will now gather all the elements from adjacent processors
598  // which are of the same state, which should address all the type (ii)
599  // elements.
600 
601  // A list of all the processors which *may* contain neighboring elements.
602  // (for development simplicity, just make this the identity map)
603  std::vector<processor_id_type> adjacent_processors;
604  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
605  if (pid != mesh.processor_id())
606  adjacent_processors.push_back (pid);
607 
608 
609  const processor_id_type n_adjacent_processors =
610  cast_int<processor_id_type>(adjacent_processors.size());
611 
612  //-------------------------------------------------------------------------
613  // Let's build a list of all nodes which live on NULL-neighbor sides.
614  // For simplicity, we will use a set to build the list, then transfer
615  // it to a vector for communication.
616  std::vector<dof_id_type> my_interface_node_list;
617  std::vector<const Elem *> my_interface_elements;
618  {
619  std::set<dof_id_type> my_interface_node_set;
620 
621  // since parent nodes are a subset of children nodes, this should be sufficient
622  for (const auto & elem : mesh.active_local_element_ptr_range())
623  {
624  libmesh_assert(elem);
625 
626  if (elem->on_boundary()) // denotes *any* side has a NULL neighbor
627  {
628  my_interface_elements.push_back(elem); // add the element, but only once, even
629  // if there are multiple NULL neighbors
630  for (auto s : elem->side_index_range())
631  if (elem->neighbor_ptr(s) == libmesh_nullptr)
632  {
634 
635  for (unsigned int n=0; n<side->n_vertices(); n++)
636  my_interface_node_set.insert (side->node_id(n));
637  }
638  }
639  }
640 
641  my_interface_node_list.reserve (my_interface_node_set.size());
642  my_interface_node_list.insert (my_interface_node_list.end(),
643  my_interface_node_set.begin(),
644  my_interface_node_set.end());
645  }
646 
647  // we will now send my_interface_node_list to all of the adjacent processors.
648  // note that for the time being we will copy the list to a unique buffer for
649  // each processor so that we can use a nonblocking send and not access the
650  // buffer again until the send completes. it is my understanding that the
651  // MPI 2.1 standard seeks to remove this restriction as unnecessary, so in
652  // the future we should change this to send the same buffer to each of the
653  // adjacent processors. - BSK 11/17/2008
654  std::vector<std::vector<dof_id_type>>
655  my_interface_node_xfer_buffers (n_adjacent_processors, my_interface_node_list);
656  std::map<processor_id_type, unsigned char> n_comm_steps;
657 
658  std::vector<Parallel::Request> send_requests (3*n_adjacent_processors);
659  unsigned int current_request = 0;
660 
661  for (unsigned int comm_step=0; comm_step<n_adjacent_processors; comm_step++)
662  {
663  n_comm_steps[adjacent_processors[comm_step]]=1;
664  mesh.comm().send (adjacent_processors[comm_step],
665  my_interface_node_xfer_buffers[comm_step],
666  send_requests[current_request++],
667  element_neighbors_tag);
668  }
669 
670  //-------------------------------------------------------------------------
671  // processor pairings are symmetric - I expect to receive an interface node
672  // list from each processor in adjacent_processors as well!
673  // now we will catch an incoming node list for each of our adjacent processors.
674  //
675  // we are done with the adjacent_processors list - note that it is in general
676  // a superset of the processors we truly share elements with. so let's
677  // clear the superset list, and we will fill it with the true list.
678  adjacent_processors.clear();
679 
680  std::vector<dof_id_type> common_interface_node_list;
681 
682  // we expect two classes of messages -
683  // (1) incoming interface node lists, to which we will reply with our elements
684  // touching nodes in the list, and
685  // (2) replies from the requests we sent off previously.
686  // (2.a) - nodes
687  // (2.b) - elements
688  // so we expect 3 communications from each adjacent processor.
689  // by structuring the communication in this way we hopefully impose no
690  // order on the handling of the arriving messages. in particular, we
691  // should be able to handle the case where we receive a request and
692  // all replies from processor A before even receiving a request from
693  // processor B.
694 
695  for (unsigned int comm_step=0; comm_step<3*n_adjacent_processors; comm_step++)
696  {
697  //------------------------------------------------------------------
698  // catch incoming node list
701  element_neighbors_tag));
702  const processor_id_type
703  source_pid_idx = cast_int<processor_id_type>(status.source()),
704  dest_pid_idx = source_pid_idx;
705 
706  //------------------------------------------------------------------
707  // first time - incoming request
708  if (n_comm_steps[source_pid_idx] == 1)
709  {
710  n_comm_steps[source_pid_idx]++;
711 
712  mesh.comm().receive (source_pid_idx,
713  common_interface_node_list,
714  element_neighbors_tag);
715  const std::size_t
716  their_interface_node_list_size = common_interface_node_list.size();
717 
718  // we now have the interface node list from processor source_pid_idx.
719  // now we can find all of our elements which touch any of these nodes
720  // and send copies back to this processor. however, we can make our
721  // search more efficient by first excluding all the nodes in
722  // their list which are not also contained in
723  // my_interface_node_list. we can do this in place as a set
724  // intersection.
725  common_interface_node_list.erase
726  (std::set_intersection (my_interface_node_list.begin(),
727  my_interface_node_list.end(),
728  common_interface_node_list.begin(),
729  common_interface_node_list.end(),
730  common_interface_node_list.begin()),
731  common_interface_node_list.end());
732 
733  if (false)
734  libMesh::out << "[" << mesh.processor_id() << "] "
735  << "my_interface_node_list.size()=" << my_interface_node_list.size()
736  << ", [" << source_pid_idx << "] "
737  << "their_interface_node_list.size()=" << their_interface_node_list_size
738  << ", common_interface_node_list.size()=" << common_interface_node_list.size()
739  << std::endl;
740 
741  // Now we need to see which of our elements touch the nodes in the list.
742  // We will certainly send all the active elements which intersect source_pid_idx,
743  // but we will also ship off the other elements in the same family tree
744  // as the active ones for data structure consistency.
745  //
746  // FIXME - shipping full family trees is unnecessary and inefficient.
747  //
748  // We also ship any nodes connected to these elements. Note
749  // some of these nodes and elements may be replicated from
750  // other processors, but that is OK.
751  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
752  std::set<const Node *> connected_nodes;
753 
754  // Check for quick return?
755  if (common_interface_node_list.empty())
756  {
757  // let's try to be smart here - if we have no nodes in common,
758  // we cannot share elements. so post the messages expected
759  // from us here and go on about our business.
760  // note that even though these are nonblocking sends
761  // they should complete essentially instantly, because
762  // in all cases the send buffers are empty
763  mesh.comm().send_packed_range (dest_pid_idx,
764  &mesh,
765  connected_nodes.begin(),
766  connected_nodes.end(),
767  send_requests[current_request++],
768  element_neighbors_tag);
769 
770  mesh.comm().send_packed_range (dest_pid_idx,
771  &mesh,
772  elements_to_send.begin(),
773  elements_to_send.end(),
774  send_requests[current_request++],
775  element_neighbors_tag);
776 
777  continue;
778  }
779  // otherwise, this really *is* an adjacent processor.
780  adjacent_processors.push_back(source_pid_idx);
781 
782  std::vector<const Elem *> family_tree;
783 
784  for (std::size_t e=0, n_shared_nodes=0; e<my_interface_elements.size(); e++, n_shared_nodes=0)
785  {
786  const Elem * elem = my_interface_elements[e];
787 
788  for (unsigned int n=0; n<elem->n_vertices(); n++)
789  if (std::binary_search (common_interface_node_list.begin(),
790  common_interface_node_list.end(),
791  elem->node_id(n)))
792  {
793  n_shared_nodes++;
794 
795  // TBD - how many nodes do we need to share
796  // before we care? certainly 2, but 1? not
797  // sure, so let's play it safe...
798  if (n_shared_nodes > 0) break;
799  }
800 
801  if (n_shared_nodes) // share at least one node?
802  {
803  elem = elem->top_parent();
804 
805  // avoid a lot of duplicated effort -- if we already have elem
806  // in the set its entire family tree is already in the set.
807  if (!elements_to_send.count(elem))
808  {
809 #ifdef LIBMESH_ENABLE_AMR
810  elem->family_tree(family_tree);
811 #else
812  family_tree.clear();
813  family_tree.push_back(elem);
814 #endif
815  for (std::size_t leaf=0; leaf<family_tree.size(); leaf++)
816  {
817  elem = family_tree[leaf];
818  elements_to_send.insert (elem);
819 
820  for (auto & n : elem->node_ref_range())
821  connected_nodes.insert (&n);
822  }
823  }
824  }
825  }
826 
827  // The elements_to_send and connected_nodes sets now contain all
828  // the elements and nodes we need to send to this processor.
829  // All that remains is to pack up the objects (along with
830  // any boundary conditions) and send the messages off.
831  {
832  libmesh_assert (connected_nodes.empty() || !elements_to_send.empty());
833  libmesh_assert (!connected_nodes.empty() || elements_to_send.empty());
834 
835  // send the nodes off to the destination processor
836  mesh.comm().send_packed_range (dest_pid_idx,
837  &mesh,
838  connected_nodes.begin(),
839  connected_nodes.end(),
840  send_requests[current_request++],
841  element_neighbors_tag);
842 
843  // send the elements off to the destination processor
844  mesh.comm().send_packed_range (dest_pid_idx,
845  &mesh,
846  elements_to_send.begin(),
847  elements_to_send.end(),
848  send_requests[current_request++],
849  element_neighbors_tag);
850  }
851  }
852  //------------------------------------------------------------------
853  // second time - reply of nodes
854  else if (n_comm_steps[source_pid_idx] == 2)
855  {
856  n_comm_steps[source_pid_idx]++;
857 
858  mesh.comm().receive_packed_range (source_pid_idx,
859  &mesh,
862  element_neighbors_tag);
863  }
864  //------------------------------------------------------------------
865  // third time - reply of elements
866  else if (n_comm_steps[source_pid_idx] == 3)
867  {
868  n_comm_steps[source_pid_idx]++;
869 
870  mesh.comm().receive_packed_range (source_pid_idx,
871  &mesh,
874  element_neighbors_tag);
875  }
876  //------------------------------------------------------------------
877  // fourth time - shouldn't happen
878  else
879  {
880  libMesh::err << "ERROR: unexpected number of replies: "
881  << n_comm_steps[source_pid_idx]
882  << std::endl;
883  }
884  } // done catching & processing replies associated with tag ~ 100,000pi
885 
886  // allow any pending requests to complete
887  Parallel::wait (send_requests);
888 
889  // If we had a point locator, it's invalid now that there are new
890  // elements it can't locate.
891  mesh.clear_point_locator();
892 
893  // We can now find neighbor information for the interfaces between
894  // local elements and ghost elements.
895  mesh.find_neighbors (/* reset_remote_elements = */ true,
896  /* reset_current_list = */ false);
897 
898  // Ghost elements may not have correct remote_elem neighbor links,
899  // and we may not be able to locally infer correct neighbor links to
900  // remote elements. So we synchronize ghost element neighbor links.
901  SyncNeighbors nsync(mesh);
902 
904  (mesh.comm(), mesh.elements_begin(), mesh.elements_end(), nsync);
905 }
906 #endif // LIBMESH_HAVE_MPI
907 
908 
909 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
910 // ------------------------------------------------------------
912 {
913  // no MPI == one processor, no need for this method...
914  return;
915 }
916 #else
918 {
919 
920  // Don't need to do anything if all processors already ghost all non-local
921  // elements.
922  if (mesh.is_serial())
923  return;
924 
925  // This algorithm uses the MeshBase::flagged_pid_elements_begin/end iterators
926  // which are only available when AMR is enabled.
927 #ifndef LIBMESH_ENABLE_AMR
928  libmesh_error_msg("Calling MeshCommunication::send_coarse_ghosts() requires AMR to be enabled. "
929  "Please configure libmesh with --enable-amr.");
930 #else
931  // When we coarsen elements on a DistributedMesh, we make their
932  // parents active. This may increase the ghosting requirements on
933  // the processor which owns the newly-activated parent element. To
934  // ensure ghosting requirements are satisfied, processors which
935  // coarsen an element will send all the associated ghosted elements
936  // to all processors which own any of the coarsened-away-element's
937  // siblings.
938  typedef std::unordered_map<processor_id_type, std::vector<Elem *>> ghost_map;
939  ghost_map elements_to_ghost;
940 
941  const processor_id_type proc_id = mesh.processor_id();
942  // Look for just-coarsened elements
947 
948  for ( ; it != end; ++it)
949  {
950  Elem * elem = *it;
951 
952  // If it's flagged for coarsening it had better have a parent
953  libmesh_assert(elem->parent());
954 
955  // On a distributed mesh:
956  // If we don't own this element's parent but we do own it, then
957  // there is a chance that we are aware of ghost elements which
958  // the parent's owner needs us to send them.
959  const processor_id_type their_proc_id = elem->parent()->processor_id();
960  if (their_proc_id != proc_id)
961  elements_to_ghost[their_proc_id].push_back(elem);
962  }
963 
964  const processor_id_type n_proc = mesh.n_processors();
965 
966  // Get a few unique message tags to use in communications; we'll
967  // default to some numbers around pi*1000
969  nodestag = mesh.comm().get_unique_tag(3141),
970  elemstag = mesh.comm().get_unique_tag(3142);
971 
972  std::vector<Parallel::Request> send_requests;
973 
974  // Using unsigned char instead of bool since it'll get converted for
975  // MPI use later anyway
976  std::vector<unsigned char> send_to_proc(n_proc, 0);
977 
978  for (processor_id_type p=0; p != n_proc; ++p)
979  {
980  if (p == proc_id)
981  break;
982 
983  // We'll send these asynchronously, but their data will be
984  // packed into Parallel:: buffers so it will be okay when the
985  // original containers are destructed before the sends complete.
986  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
987  std::set<const Node *> nodes_to_send;
988 
989  const ghost_map::const_iterator it =
990  elements_to_ghost.find(p);
991  if (it != elements_to_ghost.end())
992  {
993  const std::vector<Elem *> & elems = it->second;
994  libmesh_assert(elems.size());
995 
996  // Make some fake element iterators defining this vector of
997  // elements
998  Elem * const * elempp = const_cast<Elem * const *>(&elems[0]);
999  Elem * const * elemend = elempp+elems.size();
1000  const MeshBase::const_element_iterator elem_it =
1002  const MeshBase::const_element_iterator elem_end =
1004 
1005  std::set<GhostingFunctor *>::iterator gf_it = mesh.ghosting_functors_begin();
1006  const std::set<GhostingFunctor *>::iterator gf_end = mesh.ghosting_functors_end();
1007  for (; gf_it != gf_end; ++gf_it)
1008  {
1009  GhostingFunctor::map_type elements_to_ghost;
1010 
1011  GhostingFunctor *gf = *gf_it;
1012  libmesh_assert(gf);
1013  (*gf)(elem_it, elem_end, p, elements_to_ghost);
1014 
1015  // We can ignore the CouplingMatrix in ->second, but we
1016  // need to ghost all the elements in ->first.
1017  GhostingFunctor::map_type::iterator etg_it = elements_to_ghost.begin();
1018  const GhostingFunctor::map_type::iterator etg_end = elements_to_ghost.end();
1019  for (; etg_it != etg_end; ++etg_it)
1020  {
1021  const Elem * elem = etg_it->first;
1022  libmesh_assert(elem);
1023  while (elem)
1024  {
1025  libmesh_assert(elem != remote_elem);
1026  elements_to_send.insert(elem);
1027  for (auto & n : elem->node_ref_range())
1028  nodes_to_send.insert(&n);
1029  elem = elem->parent();
1030  }
1031  }
1032  }
1033 
1034  send_requests.push_back(Parallel::request());
1035 
1036  mesh.comm().send_packed_range (p, &mesh,
1037  nodes_to_send.begin(),
1038  nodes_to_send.end(),
1039  send_requests.back(),
1040  nodestag);
1041 
1042  send_requests.push_back(Parallel::request());
1043 
1044  send_to_proc[p] = 1; // true
1045 
1046  mesh.comm().send_packed_range (p, &mesh,
1047  elements_to_send.begin(),
1048  elements_to_send.end(),
1049  send_requests.back(),
1050  elemstag);
1051  }
1052  }
1053 
1054  // Find out how many other processors are sending us elements+nodes.
1055  std::vector<unsigned char> recv_from_proc(send_to_proc);
1056  mesh.comm().alltoall(recv_from_proc);
1057 
1058  const processor_id_type n_receives =
1059  std::count(recv_from_proc.begin(), recv_from_proc.end(), 1);
1060 
1061  // Receive nodes first since elements will need to attach to them
1062  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1063  {
1064  mesh.comm().receive_packed_range
1066  &mesh,
1068  (Node**)libmesh_nullptr,
1069  nodestag);
1070  }
1071 
1072  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1073  {
1074  mesh.comm().receive_packed_range
1076  &mesh,
1078  (Elem**)libmesh_nullptr,
1079  elemstag);
1080  }
1081 
1082  // Wait for all sends to complete
1083  Parallel::wait (send_requests);
1084 #endif // LIBMESH_ENABLE_AMR
1085 }
1086 
1087 #endif // LIBMESH_HAVE_MPI
1088 
1089 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1090 // ------------------------------------------------------------
1092 {
1093  // no MPI == one processor, no need for this method...
1094  return;
1095 }
1096 #else
1097 // ------------------------------------------------------------
1098 void MeshCommunication::broadcast (MeshBase & mesh) const
1099 {
1100  // Don't need to do anything if there is
1101  // only one processor.
1102  if (mesh.n_processors() == 1)
1103  return;
1104 
1105  // This function must be run on all processors at once
1106  libmesh_parallel_only(mesh.comm());
1107 
1108  LOG_SCOPE("broadcast()", "MeshCommunication");
1109 
1110  // Explicitly clear the mesh on all but processor 0.
1111  if (mesh.processor_id() != 0)
1112  mesh.clear();
1113 
1114  // Broadcast nodes
1115  mesh.comm().broadcast_packed_range(&mesh,
1116  mesh.nodes_begin(),
1117  mesh.nodes_end(),
1118  &mesh,
1120 
1121  // Broadcast elements from coarsest to finest, so that child
1122  // elements will see their parents already in place.
1123  //
1124  // When restarting from a checkpoint, we may have elements which are
1125  // assigned to a processor but which have not yet been sent to that
1126  // processor, so we need to use a paranoid n_levels() count and not
1127  // the usual fast algorithm.
1128  const unsigned int n_levels = MeshTools::paranoid_n_levels(mesh);
1129 
1130  for (unsigned int l=0; l != n_levels; ++l)
1131  mesh.comm().broadcast_packed_range(&mesh,
1132  mesh.level_elements_begin(l),
1133  mesh.level_elements_end(l),
1134  &mesh,
1136 
1137  // Make sure mesh_dimension and elem_dimensions are consistent.
1138  mesh.cache_elem_dims();
1139 
1140  // Broadcast all of the named entity information
1141  mesh.comm().broadcast(mesh.set_subdomain_name_map());
1144 
1145  // If we had a point locator, it's invalid now that there are new
1146  // elements it can't locate.
1147  mesh.clear_point_locator();
1148 
1149  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1150  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1151 
1152 #ifdef DEBUG
1153  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
1154  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
1155 #endif
1156 }
1157 #endif // LIBMESH_HAVE_MPI
1158 
1159 
1160 
1161 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1162 // ------------------------------------------------------------
1164 {
1165  // no MPI == one processor, no need for this method...
1166  return;
1167 }
1168 #else
1169 // ------------------------------------------------------------
1170 void MeshCommunication::gather (const processor_id_type root_id, DistributedMesh & mesh) const
1171 {
1172  // Check for quick return
1173  if (mesh.n_processors() == 1)
1174  return;
1175 
1176  // This function must be run on all processors at once
1177  libmesh_parallel_only(mesh.comm());
1178 
1179  LOG_SCOPE("(all)gather()", "MeshCommunication");
1180 
1181  (root_id == DofObject::invalid_processor_id) ?
1182 
1183  mesh.comm().allgather_packed_range (&mesh,
1184  mesh.nodes_begin(),
1185  mesh.nodes_end(),
1187 
1188  mesh.comm().gather_packed_range (root_id,
1189  &mesh,
1190  mesh.nodes_begin(),
1191  mesh.nodes_end(),
1193 
1194  // Gather elements from coarsest to finest, so that child
1195  // elements will see their parents already in place.
1196  const unsigned int n_levels = MeshTools::n_levels(mesh);
1197 
1198  for (unsigned int l=0; l != n_levels; ++l)
1199  (root_id == DofObject::invalid_processor_id) ?
1200 
1201  mesh.comm().allgather_packed_range (&mesh,
1202  mesh.level_elements_begin(l),
1203  mesh.level_elements_end(l),
1205 
1206  mesh.comm().gather_packed_range (root_id,
1207  &mesh,
1208  mesh.level_elements_begin(l),
1209  mesh.level_elements_end(l),
1211 
1212  // If we had a point locator, it's invalid now that there are new
1213  // elements it can't locate.
1214  mesh.clear_point_locator();
1215 
1216  // If we are doing an allgather(), perform sanity check on the result.
1217  if (root_id == DofObject::invalid_processor_id)
1218  {
1219  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1220  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1221  }
1222 
1223  // Inform new elements of their neighbors,
1224  // while resetting all remote_elem links on
1225  // the ranks which did the gather.
1227  root_id == mesh.processor_id());
1228 
1229  // All done, but let's make sure it's done correctly
1230 
1231 #ifdef DEBUG
1233 #endif
1234 }
1235 #endif // LIBMESH_HAVE_MPI
1236 
1237 
1238 
1239 // Functor for make_elems_parallel_consistent and
1240 // make_node_ids_parallel_consistent
1241 namespace {
1242 
1243 struct SyncIds
1244 {
1245  typedef dof_id_type datum;
1246  typedef void (MeshBase::*renumber_obj)(dof_id_type, dof_id_type);
1247 
1248  SyncIds(MeshBase & _mesh, renumber_obj _renumberer) :
1249  mesh(_mesh),
1250  renumber(_renumberer) {}
1251 
1253  renumber_obj renumber;
1254  // renumber_obj & renumber;
1255 
1256  // Find the id of each requested DofObject -
1257  // Parallel::sync_* already did the work for us
1258  void gather_data (const std::vector<dof_id_type> & ids,
1259  std::vector<datum> & ids_out) const
1260  {
1261  ids_out = ids;
1262  }
1263 
1264  void act_on_data (const std::vector<dof_id_type> & old_ids,
1265  std::vector<datum> & new_ids) const
1266  {
1267  for (std::size_t i=0; i != old_ids.size(); ++i)
1268  if (old_ids[i] != new_ids[i])
1269  (mesh.*renumber)(old_ids[i], new_ids[i]);
1270  }
1271 };
1272 
1273 
1274 struct SyncNodeIds
1275 {
1276  typedef dof_id_type datum;
1277 
1278  SyncNodeIds(MeshBase & _mesh) :
1279  mesh(_mesh) {}
1280 
1281  MeshBase & mesh;
1282 
1283  // We only know a Node id() is definitive if we own the Node or if
1284  // we're told it's definitive. We keep track of the latter cases by
1285  // putting ghost node definitive ids into this set.
1286  typedef std::unordered_set<dof_id_type> uset_type;
1287  uset_type definitive_ids;
1288 
1289  // We should never be told two different definitive ids for the same
1290  // node, but let's check on that in debug mode.
1291 #ifdef DEBUG
1292  typedef std::unordered_map<dof_id_type, dof_id_type> umap_type;
1294 #endif
1295 
1296  // Find the id of each requested DofObject -
1297  // Parallel::sync_* already tried to do the work for us, but we can
1298  // only say the result is definitive if we own the DofObject or if
1299  // we were given the definitive result from another processor.
1300  void gather_data (const std::vector<dof_id_type> & ids,
1301  std::vector<datum> & ids_out) const
1302  {
1303  ids_out.clear();
1304  ids_out.resize(ids.size(), DofObject::invalid_id);
1305 
1306  for (std::size_t i = 0; i != ids.size(); ++i)
1307  {
1308  const dof_id_type id = ids[i];
1309  const Node * node = mesh.node_ptr(id);
1310  if (node->processor_id() == mesh.processor_id() ||
1311  definitive_ids.count(id))
1312  ids_out[i] = id;
1313  }
1314  }
1315 
1316  bool act_on_data (const std::vector<dof_id_type> & old_ids,
1317  std::vector<datum> & new_ids)
1318  {
1319  bool data_changed = false;
1320  for (std::size_t i=0; i != old_ids.size(); ++i)
1321  {
1322  const dof_id_type new_id = new_ids[i];
1323  if (new_id != DofObject::invalid_id)
1324  {
1325  const dof_id_type old_id = old_ids[i];
1326 
1327  Node * node = mesh.query_node_ptr(old_id);
1328 
1329  // If we can't find the node we were asking about, another
1330  // processor must have already given us the definitive id
1331  // for it
1332  if (!node)
1333  {
1334  // But let's check anyway in debug mode
1335 #ifdef DEBUG
1337  (definitive_renumbering.count(old_id));
1338  libmesh_assert_equal_to
1339  (new_id, definitive_renumbering[old_id]);
1340 #endif
1341  continue;
1342  }
1343 
1344  if (node->processor_id() != mesh.processor_id())
1345  definitive_ids.insert(new_id);
1346  if (old_id != new_id)
1347  {
1348 #ifdef DEBUG
1350  (!definitive_renumbering.count(old_id));
1351  definitive_renumbering[old_id] = new_id;
1352 #endif
1353  mesh.renumber_node(old_id, new_id);
1354  data_changed = true;
1355  }
1356  }
1357  }
1358  return data_changed;
1359  }
1360 };
1361 
1362 
1363 #ifdef LIBMESH_ENABLE_AMR
1364 struct SyncPLevels
1365 {
1366  typedef unsigned char datum;
1367 
1368  SyncPLevels(MeshBase & _mesh) :
1369  mesh(_mesh) {}
1370 
1371  MeshBase & mesh;
1372 
1373  // Find the p_level of each requested Elem
1374  void gather_data (const std::vector<dof_id_type> & ids,
1375  std::vector<datum> & ids_out) const
1376  {
1377  ids_out.reserve(ids.size());
1378 
1379  for (std::size_t i=0; i != ids.size(); ++i)
1380  {
1381  Elem & elem = mesh.elem_ref(ids[i]);
1382 
1383  ids_out.push_back(elem.p_level());
1384  }
1385  }
1386 
1387  void act_on_data (const std::vector<dof_id_type> & old_ids,
1388  std::vector<datum> & new_p_levels) const
1389  {
1390  for (std::size_t i=0; i != old_ids.size(); ++i)
1391  {
1392  Elem & elem = mesh.elem_ref(old_ids[i]);
1393 
1394  elem.set_p_level(new_p_levels[i]);
1395  }
1396  }
1397 };
1398 #endif // LIBMESH_ENABLE_AMR
1399 
1400 
1401 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1402 template <typename DofObjSubclass>
1403 struct SyncUniqueIds
1404 {
1405  typedef unique_id_type datum;
1406  typedef DofObjSubclass* (MeshBase::*query_obj)(const dof_id_type);
1407 
1408  SyncUniqueIds(MeshBase &_mesh, query_obj _querier) :
1409  mesh(_mesh),
1410  query(_querier) {}
1411 
1412  MeshBase &mesh;
1413  query_obj query;
1414 
1415  // Find the id of each requested DofObject -
1416  // Parallel::sync_* already did the work for us
1417  void gather_data (const std::vector<dof_id_type>& ids,
1418  std::vector<datum>& ids_out) const
1419  {
1420  ids_out.reserve(ids.size());
1421 
1422  for (std::size_t i=0; i != ids.size(); ++i)
1423  {
1424  DofObjSubclass *d = (mesh.*query)(ids[i]);
1425  libmesh_assert(d);
1426 
1427  ids_out.push_back(d->unique_id());
1428  }
1429  }
1430 
1431  void act_on_data (const std::vector<dof_id_type>& ids,
1432  std::vector<datum>& unique_ids) const
1433  {
1434  for (std::size_t i=0; i != ids.size(); ++i)
1435  {
1436  DofObjSubclass *d = (mesh.*query)(ids[i]);
1437  libmesh_assert(d);
1438 
1439  d->set_unique_id() = unique_ids[i];
1440  }
1441  }
1442 };
1443 #endif // LIBMESH_ENABLE_UNIQUE_ID
1444 }
1445 
1446 
1447 
1448 // ------------------------------------------------------------
1450 {
1451  // This function must be run on all processors at once
1452  libmesh_parallel_only(mesh.comm());
1453 
1454  LOG_SCOPE ("make_node_ids_parallel_consistent()", "MeshCommunication");
1455 
1456  SyncNodeIds syncids(mesh);
1458  (mesh, mesh.elements_begin(), mesh.elements_end(),
1460 }
1461 
1462 
1463 
1465 {
1466  // Avoid unused variable warnings if unique ids aren't enabled.
1467  libmesh_ignore(mesh);
1468 
1469  // This function must be run on all processors at once
1470  libmesh_parallel_only(mesh.comm());
1471 
1472 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1473  LOG_SCOPE ("make_node_unique_ids_parallel_consistent()", "MeshCommunication");
1474 
1475  SyncUniqueIds<Node> syncuniqueids(mesh, &MeshBase::query_node_ptr);
1477  mesh.nodes_begin(),
1478  mesh.nodes_end(),
1479  syncuniqueids);
1480 
1481 #endif
1482 }
1483 
1484 
1485 
1486 
1487 // ------------------------------------------------------------
1489 {
1490  // This function must be run on all processors at once
1491  libmesh_parallel_only(mesh.comm());
1492 
1493  LOG_SCOPE ("make_elems_parallel_consistent()", "MeshCommunication");
1494 
1495  SyncIds syncids(mesh, &MeshBase::renumber_elem);
1497  (mesh, mesh.active_elements_begin(),
1498  mesh.active_elements_end(), syncids);
1499 
1500 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1501  SyncUniqueIds<Elem> syncuniqueids(mesh, &MeshBase::query_elem_ptr);
1503  (mesh.comm(), mesh.active_elements_begin(),
1504  mesh.active_elements_end(), syncuniqueids);
1505 #endif
1506 }
1507 
1508 
1509 
1510 // ------------------------------------------------------------
1511 #ifdef LIBMESH_ENABLE_AMR
1513 {
1514  // This function must be run on all processors at once
1515  libmesh_parallel_only(mesh.comm());
1516 
1517  LOG_SCOPE ("make_p_levels_parallel_consistent()", "MeshCommunication");
1518 
1519  SyncPLevels syncplevels(mesh);
1521  (mesh.comm(), mesh.elements_begin(), mesh.elements_end(),
1522  syncplevels);
1523 }
1524 #endif // LIBMESH_ENABLE_AMR
1525 
1526 
1527 
1528 // Functors for make_node_proc_ids_parallel_consistent
1529 namespace {
1530 
1531 struct SyncProcIds
1532 {
1533  typedef processor_id_type datum;
1534 
1535  SyncProcIds(MeshBase & _mesh) : mesh(_mesh) {}
1536 
1537  MeshBase & mesh;
1538 
1539  // ------------------------------------------------------------
1540  void gather_data (const std::vector<dof_id_type> & ids,
1541  std::vector<datum> & data)
1542  {
1543  // Find the processor id of each requested node
1544  data.resize(ids.size());
1545 
1546  for (std::size_t i=0; i != ids.size(); ++i)
1547  {
1548  // Look for this point in the mesh
1549  // We'd better find every node we're asked for
1550  Node & node = mesh.node_ref(ids[i]);
1551 
1552  // Return the node's correct processor id,
1553  data[i] = node.processor_id();
1554  }
1555  }
1556 
1557  // ------------------------------------------------------------
1558  bool act_on_data (const std::vector<dof_id_type> & ids,
1559  std::vector<datum> proc_ids)
1560  {
1561  bool data_changed = false;
1562 
1563  // Set the ghost node processor ids we've now been informed of
1564  for (std::size_t i=0; i != ids.size(); ++i)
1565  {
1566  Node & node = mesh.node_ref(ids[i]);
1567 
1568  // If someone tells us our node processor id is too low, then
1569  // they're wrong. If they tell us our node processor id is
1570  // too high, then we're wrong.
1571  if (node.processor_id() > proc_ids[i])
1572  {
1573  data_changed = true;
1574  node.processor_id() = proc_ids[i];
1575  }
1576  }
1577 
1578  return data_changed;
1579  }
1580 };
1581 
1582 
1583 struct ElemNodesMaybeNew
1584 {
1585  ElemNodesMaybeNew() {}
1586 
1587  bool operator() (const Elem * elem) const
1588  {
1589  // If this element was just refined then it may have new nodes we
1590  // need to work on
1591 #ifdef LIBMESH_ENABLE_AMR
1592  if (elem->refinement_flag() == Elem::JUST_REFINED)
1593  return true;
1594 #endif
1595 
1596  // If this element has remote_elem neighbors then there may have
1597  // been refinement of those neighbors that affect its nodes'
1598  // processor_id()
1599  for (auto neigh : elem->neighbor_ptr_range())
1600  if (neigh == remote_elem)
1601  return true;
1602  return false;
1603  }
1604 };
1605 
1606 
1607 struct NodeMaybeNew
1608 {
1609  NodeMaybeNew() {}
1610 
1611  bool operator() (const Elem * elem, unsigned int local_node_num) const
1612  {
1613 #ifdef LIBMESH_ENABLE_AMR
1614  const Elem * parent = elem->parent();
1615 
1616  // If this is a child node which wasn't already a parent node then
1617  // it might be a new node we need to work on.
1618  if (parent)
1619  {
1620  const unsigned int c = parent->which_child_am_i(elem);
1621  if (parent->as_parent_node(c, local_node_num) == libMesh::invalid_uint)
1622  return true;
1623  }
1624 #endif
1625 
1626  // If this node is on a side with a remote element then there may
1627  // have been refinement of that element which affects this node's
1628  // processor_id()
1629  for (auto s : elem->side_index_range())
1630  if (elem->neighbor(s) == remote_elem)
1631  if (elem->is_node_on_side(local_node_num, s))
1632  return true;
1633 
1634  return false;
1635  }
1636 };
1637 
1638 }
1639 
1640 
1641 
1642 // ------------------------------------------------------------
1644 {
1645  LOG_SCOPE ("make_node_proc_ids_parallel_consistent()", "MeshCommunication");
1646 
1647  // This function must be run on all processors at once
1648  libmesh_parallel_only(mesh.comm());
1649 
1650  // When this function is called, each section of a parallelized mesh
1651  // should be in the following state:
1652  //
1653  // All nodes should have the exact same physical location on every
1654  // processor where they exist.
1655  //
1656  // Local nodes should have unique authoritative ids,
1657  // and processor ids consistent with all processors which own
1658  // an element touching them.
1659  //
1660  // Ghost nodes touching local elements should have processor ids
1661  // consistent with all processors which own an element touching
1662  // them.
1663  SyncProcIds sync(mesh);
1665  (mesh, mesh.elements_begin(), mesh.elements_end(),
1667 }
1668 
1669 
1670 
1671 // ------------------------------------------------------------
1673 {
1674  LOG_SCOPE ("make_new_node_proc_ids_parallel_consistent()", "MeshCommunication");
1675 
1676  // This function must be run on all processors at once
1677  libmesh_parallel_only(mesh.comm());
1678 
1679  // When this function is called, each section of a parallelized mesh
1680  // should be in the following state:
1681  //
1682  // Local nodes should have unique authoritative ids,
1683  // and processor ids consistent with all processors which own
1684  // an element touching them.
1685  //
1686  // Ghost nodes touching local elements should have processor ids
1687  // consistent with all processors which own an element touching
1688  // them.
1689  SyncProcIds sync(mesh);
1691  (mesh, mesh.elements_begin(), mesh.elements_end(),
1692  ElemNodesMaybeNew(), NodeMaybeNew(), sync);
1693 }
1694 
1695 
1696 
1697 // ------------------------------------------------------------
1699 {
1700  // This function must be run on all processors at once
1701  libmesh_parallel_only(mesh.comm());
1702 
1703  // When this function is called, each section of a parallelized mesh
1704  // should be in the following state:
1705  //
1706  // All nodes should have the exact same physical location on every
1707  // processor where they exist.
1708  //
1709  // Local nodes should have unique authoritative ids,
1710  // and processor ids consistent with all processors which own
1711  // an element touching them.
1712  //
1713  // Ghost nodes touching local elements should have processor ids
1714  // consistent with all processors which own an element touching
1715  // them.
1716  //
1717  // Ghost nodes should have ids which are either already correct
1718  // or which are in the "unpartitioned" id space.
1719 
1720  // First, let's sync up processor ids. Some of these processor ids
1721  // may be "wrong" from coarsening, but they're right in the sense
1722  // that they'll tell us who has the authoritative dofobject ids for
1723  // each node.
1724 
1725  this->make_node_proc_ids_parallel_consistent(mesh);
1726 
1727  // Second, sync up dofobject ids.
1728  this->make_node_ids_parallel_consistent(mesh);
1729 
1730  // Third, sync up dofobject unique_ids if applicable.
1731  this->make_node_unique_ids_parallel_consistent(mesh);
1732 
1733  // Finally, correct the processor ids to make DofMap happy
1735 }
1736 
1737 
1738 
1739 // ------------------------------------------------------------
1741 {
1742  // This function must be run on all processors at once
1743  libmesh_parallel_only(mesh.comm());
1744 
1745  // When this function is called, each section of a parallelized mesh
1746  // should be in the following state:
1747  //
1748  // All nodes should have the exact same physical location on every
1749  // processor where they exist.
1750  //
1751  // Local nodes should have unique authoritative ids,
1752  // and processor ids consistent with all processors which own
1753  // an element touching them.
1754  //
1755  // Ghost nodes touching local elements should have processor ids
1756  // consistent with all processors which own an element touching
1757  // them.
1758  //
1759  // Ghost nodes should have ids which are either already correct
1760  // or which are in the "unpartitioned" id space.
1761 
1762  // First, let's sync up processor ids. Some of these processor ids
1763  // may be "wrong" from coarsening, but they're right in the sense
1764  // that they'll tell us who has the authoritative dofobject ids for
1765  // each node.
1766 
1767  this->make_new_node_proc_ids_parallel_consistent(mesh);
1768 
1769  // Second, sync up dofobject ids.
1770  this->make_node_ids_parallel_consistent(mesh);
1771 
1772  // Third, sync up dofobject unique_ids if applicable.
1773  this->make_node_unique_ids_parallel_consistent(mesh);
1774 
1775  // Finally, correct the processor ids to make DofMap happy
1777 }
1778 
1779 
1780 
1781 // ------------------------------------------------------------
1782 void
1784  const std::set<Elem *> & extra_ghost_elem_ids) const
1785 {
1786  // The mesh should know it's about to be parallelized
1787  libmesh_assert (!mesh.is_serial());
1788 
1789  LOG_SCOPE("delete_remote_elements()", "MeshCommunication");
1790 
1791 #ifdef DEBUG
1792  // We expect maximum ids to be in sync so we can use them to size
1793  // vectors
1794  libmesh_assert(mesh.comm().verify(mesh.max_node_id()));
1795  libmesh_assert(mesh.comm().verify(mesh.max_elem_id()));
1796  const dof_id_type par_max_node_id = mesh.parallel_max_node_id();
1797  const dof_id_type par_max_elem_id = mesh.parallel_max_elem_id();
1798  libmesh_assert_equal_to (par_max_node_id, mesh.max_node_id());
1799  libmesh_assert_equal_to (par_max_elem_id, mesh.max_elem_id());
1800 #endif
1801 
1802  std::set<const Elem *, CompareElemIdsByLevel> elements_to_keep;
1803 
1804  // Don't delete elements that we were explicitly told not to
1805  for (std::set<Elem *>::iterator it = extra_ghost_elem_ids.begin();
1806  it != extra_ghost_elem_ids.end(); ++it)
1807  {
1808  const Elem * elem = *it;
1809 
1810  std::vector<const Elem *> active_family;
1811 #ifdef LIBMESH_ENABLE_AMR
1812  if (!elem->subactive())
1813  elem->active_family_tree(active_family);
1814  else
1815 #endif
1816  active_family.push_back(elem);
1817 
1818  for (std::size_t i=0; i != active_family.size(); ++i)
1819  elements_to_keep.insert(active_family[i]);
1820  }
1821 
1822  // See which elements we still need to keep ghosted, given that
1823  // we're keeping local and unpartitioned elements.
1825  false, elements_to_keep);
1827  false, elements_to_keep);
1828 
1829  // The inactive elements we need to send should have their
1830  // immediate children present.
1831  connect_children(mesh, mesh.processor_id(), elements_to_keep);
1832  connect_children(mesh, DofObject::invalid_processor_id, elements_to_keep);
1833 
1834  // The elements we need should have their ancestors and their
1835  // subactive children present too.
1836  connect_families(elements_to_keep);
1837 
1838  // Don't delete nodes that our semilocal elements need
1839  std::set<const Node *> connected_nodes;
1840  reconnect_nodes(elements_to_keep, connected_nodes);
1841 
1842  // Delete all the elements we have no reason to save,
1843  // starting with the most refined so that the mesh
1844  // is valid at all intermediate steps
1845  unsigned int n_levels = MeshTools::n_levels(mesh);
1846 
1847  for (int l = n_levels - 1; l >= 0; --l)
1848  {
1849  MeshBase::element_iterator lev_elem_it = mesh.level_elements_begin(l),
1850  lev_end = mesh.level_elements_end(l);
1851  for (; lev_elem_it != lev_end; ++lev_elem_it)
1852  {
1853  Elem * elem = *lev_elem_it;
1854  libmesh_assert (elem);
1855  // Make sure we don't leave any invalid pointers
1856  const bool keep_me = elements_to_keep.count(elem);
1857 
1858  if (!keep_me)
1859  elem->make_links_to_me_remote();
1860 
1861  // delete_elem doesn't currently invalidate element
1862  // iterators... that had better not change
1863  if (!keep_me)
1864  mesh.delete_elem(elem);
1865  }
1866  }
1867 
1868  // Delete all the nodes we have no reason to save
1869  for (auto & node : mesh.node_ptr_range())
1870  {
1871  libmesh_assert(node);
1872  if (!connected_nodes.count(node))
1873  mesh.delete_node(node);
1874  }
1875 
1876  // If we had a point locator, it's invalid now that some of the
1877  // elements it pointed to have been deleted.
1878  mesh.clear_point_locator();
1879 
1880  // Much of our boundary info may have been for now-remote parts of
1881  // the mesh, in which case we don't want to keep local copies.
1883 
1884  // We now have all remote elements and nodes deleted; our ghosting
1885  // functors should be ready to delete any now-redundant cached data
1886  // they use too.
1887  std::set<GhostingFunctor *>::iterator gf_it = mesh.ghosting_functors_begin();
1888  const std::set<GhostingFunctor *>::iterator gf_end = mesh.ghosting_functors_end();
1889  for (; gf_it != gf_end; ++gf_it)
1890  {
1891  GhostingFunctor *gf = *gf_it;
1892  gf->delete_remote_elements();
1893  }
1894 
1895 #ifdef DEBUG
1897 #endif
1898 }
1899 
1900 } // namespace libMesh
The definition of the element_iterator struct.
Definition: mesh_base.h:1476
OStreamProxy err
virtual bool is_node_on_side(const unsigned int n, const unsigned int s) const =0
void set_p_level(const unsigned int p)
Sets the value of the p-refinement level for the element.
Definition: elem.h:2559
bool has_children() const
Definition: elem.h:2295
void redistribute(DistributedMesh &mesh, bool newly_coarsened_only=false) const
This method takes a parallel distributed mesh and redistributes the elements.
bool verify(const T &r) const
Verify that a local variable has the same value on all processors.
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:117
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: parallel.h:565
virtual UniquePtr< Elem > build_side_ptr(const unsigned int i, bool proxy=true)=0
void make_node_unique_ids_parallel_consistent(MeshBase &)
Assuming all unique_ids on local nodes are globally unique, and assuming all processor ids are parall...
A Node is like a Point, but with more information.
Definition: node.h:52
This abstract base class defines the interface by which library code and user code can report associa...
virtual void renumber_node(dof_id_type old_id, dof_id_type new_id)=0
Changes the id of node old_id, both by changing node(old_id)->id() and by moving node(old_id) in the ...
bool subactive() const
Definition: elem.h:2275
virtual bool is_serial() const
Definition: mesh_base.h:140
bool active() const
Definition: elem.h:2257
const unsigned int invalid_uint
A number which is used quite often to represent an invalid or uninitialized value.
Definition: libmesh.h:184
virtual dof_id_type n_elem() const libmesh_override
virtual element_iterator level_elements_begin(unsigned int level)=0
Iterate over elements of a given level.
void correct_node_proc_ids(MeshBase &)
Changes the processor ids on each node so be the same as the id of the lowest element touching that n...
Definition: mesh_tools.C:1832
const unsigned int any_source
Accept from any source.
Definition: parallel.h:204
Encapsulates the MPI_Status struct.
Definition: parallel.h:461
unsigned int p_level() const
Definition: elem.h:2422
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:656
The definition of the const_element_iterator struct.
Definition: mesh_base.h:1494
void connect_children(const MeshBase &mesh, processor_id_type pid, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
IntRange< unsigned short > side_index_range() const
Definition: elem.h:2083
void reconnect_nodes(const std::set< const Elem *, CompareElemIdsByLevel > &connected_elements, std::set< const Node * > &connected_nodes)
void broadcast(MeshBase &) const
Finds all the processors that may contain elements that neighbor my elements.
void make_elems_parallel_consistent(MeshBase &)
Copy ids of ghost elements from their local processors.
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:420
virtual SimpleRange< element_iterator > active_local_element_ptr_range() libmesh_override
virtual element_iterator unpartitioned_elements_begin() libmesh_override
Iterate over unpartitioned elements in the Mesh.
void libmesh_assert_valid_refinement_tree(const MeshBase &mesh)
A function for verifying that elements on this processor have valid descendants and consistent active...
Definition: mesh_tools.C:1678
const Elem * parent() const
Definition: elem.h:2346
unsigned short int side
Definition: xdr_io.C:49
void clear()
Clears all data structures and resets to a pristine state.
processor_id_type n_processors() const
This is the base class from which all geometric element types are derived.
Definition: elem.h:89
unsigned int n_neighbors() const
Definition: elem.h:613
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
const class libmesh_nullptr_t libmesh_nullptr
virtual const Node * node_ptr(const dof_id_type i) const =0
dof_id_type parallel_max_elem_id() const
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end...
virtual void redistribute()
GhostingFunctor subclasses with relatively long-lasting caches may want to redistribute those caches ...
virtual element_iterator flagged_pid_elements_end(unsigned char rflag, processor_id_type pid)=0
void make_links_to_me_remote()
Resets this element&#39;s neighbors&#39; appropriate neighbor pointers and its parent&#39;s and children&#39;s approp...
Definition: elem.C:1293
The libMesh namespace provides an interface to certain functionality in the library.
void send_coarse_ghosts(MeshBase &) const
Examine a just-coarsened mesh, and for any newly-coarsened elements, send the associated ghosted elem...
void active_family_tree(std::vector< const Elem * > &active_family, const bool reset=true) const
Same as the family_tree() member, but only adds the active children.
Definition: elem.C:1724
void gather_neighboring_elements(DistributedMesh &) const
virtual void find_neighbors(const bool reset_remote_elements=false, const bool reset_current_list=true) libmesh_override
Other functions from MeshBase requiring re-definition.
virtual node_iterator nodes_end() libmesh_override
void gather(const processor_id_type root_id, DistributedMesh &) const
This method takes an input DistributedMesh which may be distributed among all the processors...
Used to iterate over non-NULL entries in a container.
virtual element_iterator unpartitioned_elements_end() libmesh_override
This is the MeshBase class.
Definition: mesh_base.h:68
virtual element_iterator level_elements_end(unsigned int level) libmesh_override
SimpleRange< ChildRefIter > child_ref_range()
Returns a range with all children of a parent element, usable in range-based for loops.
Definition: elem.h:1699
unsigned int paranoid_n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:631
libmesh_assert(j)
std::set< GhostingFunctor * >::const_iterator ghosting_functors_end() const
End of range of ghosting functors.
Definition: mesh_base.h:804
std::unique_ptr< T > UniquePtr
Definition: auto_ptr.h:46
void libmesh_assert_equal_n_systems(const MeshBase &mesh)
A function for testing that all DofObjects within a mesh have the same n_systems count.
Definition: mesh_tools.C:963
void total_family_tree(std::vector< const Elem * > &active_family, const bool reset=true) const
Same as the family_tree() member, but also adds any subactive descendants.
Definition: elem.C:1704
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:1967
dof_id_type parallel_max_node_id() const
virtual node_iterator nodes_begin()=0
Iterate over all the nodes in the Mesh.
virtual element_iterator elements_begin()=0
Iterate over all the elements in the Mesh.
virtual element_iterator level_elements_end(unsigned int level)=0
virtual element_iterator level_elements_begin(unsigned int level) libmesh_override
Iterate over elements of a given level.
std::map< boundary_id_type, std::string > & set_sideset_name_map()
void make_new_node_proc_ids_parallel_consistent(MeshBase &)
Assuming all processor ids on nodes touching local elements are parallel consistent, this function makes processor ids on new nodes on other processors parallel consistent as well.
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
Definition: dof_object.h:335
virtual element_iterator active_pid_elements_begin(processor_id_type proc_id)=0
void gather_packed_range(const unsigned int root_id, Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
Take a range of local variables, combine it with ranges from all processors, and write the output to ...
status probe(const unsigned int src_processor_id, const MessageTag &tag=any_tag) const
Blocking message probe.
Encapsulates the MPI tag integers.
Definition: parallel.h:227
void sync_node_data_by_element_id(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
Request data about a range of ghost nodes uniquely identified by an element id and local node id...
void connect_families(std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual dof_id_type n_nodes() const libmesh_override
virtual element_iterator elements_end()=0
void send_packed_range(const unsigned int dest_processor_id, const Context *context, Iter range_begin, const Iter range_end, const MessageTag &tag=no_tag) const
Blocking-send range-of-pointers to one processor.
unsigned int n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:603
void make_p_levels_parallel_consistent(MeshBase &)
Copy p levels of ghost elements from their local processors.
virtual const Node * query_node_ptr(const dof_id_type i) const =0
void clear_point_locator()
Releases the current PointLocator object.
Definition: mesh_base.C:555
std::unordered_map< const Elem *, const CouplingMatrix * > map_type
What elements do we care about and what variables do we care about on each element?
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:324
virtual dof_id_type max_node_id() const libmesh_override
void libmesh_assert_valid_boundary_ids(const MeshBase &mesh)
A function for verifying that boundary condition ids match across processors.
Definition: mesh_tools.C:1194
virtual bool is_serial() const libmesh_override
unsigned int size(const data_type &type) const
MPI_Status status
Status object for querying messages.
Definition: parallel.h:176
void receive_packed_range(const unsigned int dest_processor_id, Context *context, OutputIter out, const T *output_type, const MessageTag &tag=any_tag) const
Blocking-receive range-of-pointers from one processor.
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
Blocking-send to one processor with data-defined type.
void broadcast(T &data, const unsigned int root_id=0) const
Take a local value and broadcast it to all processors.
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1293
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
Request data about a range of ghost dofobjects uniquely identified by their id.
void set_neighbor(const unsigned int i, Elem *n)
Assigns n as the neighbor.
Definition: elem.h:2000
The DistributedMesh class is derived from the MeshBase class, and is intended to provide identical fu...
void regenerate_id_sets()
Clears and regenerates the cached sets of ids.
virtual void clear()
Deletes all the data that are currently stored.
Definition: mesh_base.C:285
umap_type definitive_renumbering
virtual node_iterator nodes_begin() libmesh_override
Node iterator accessor functions.
virtual element_iterator active_elements_begin()=0
Active, local, and negation forms of the element iterators described above.
virtual element_iterator flagged_pid_elements_begin(unsigned char rflag, processor_id_type pid)=0
Iterate over all elements with a specified refinement flag on a specified processor.
A class for templated methods that expect output iterator arguments, which adds objects to the Mesh...
void libmesh_ignore(const T &)
SimpleRange< NodeRefIter > node_ref_range()
Returns a range with all nodes of an element, usable in range-based for loops.
Definition: elem.h:2047
bool on_boundary() const
Definition: elem.h:2171
virtual element_iterator pid_elements_begin(processor_id_type proc_id)=0
Iterate over all elements with a specified processor id.
virtual element_iterator active_elements_end()=0
query_obj query
unsigned int which_child_am_i(const Elem *e) const
Definition: elem.h:2487
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:490
void make_node_ids_parallel_consistent(MeshBase &)
Assuming all ids on local nodes are globally unique, and assuming all processor ids are parallel cons...
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
std::set< GhostingFunctor * >::const_iterator ghosting_functors_begin() const
Beginning of range of ghosting functors.
Definition: mesh_base.h:798
virtual node_iterator nodes_end()=0
virtual element_iterator active_pid_elements_end(processor_id_type proc_id)=0
virtual SimpleRange< node_iterator > node_ptr_range() libmesh_override
virtual void renumber_elem(dof_id_type old_id, dof_id_type new_id)=0
Changes the id of element old_id, both by changing elem(old_id)->id() and by moving elem(old_id) in t...
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
void make_nodes_parallel_consistent(MeshBase &)
Copy processor_ids and ids on ghost nodes from their local processors.
const Parallel::Communicator & comm() const
OStreamProxy out
void make_node_proc_ids_parallel_consistent(MeshBase &)
Assuming all processor ids on nodes touching local elements are parallel consistent, this function makes all other processor ids parallel consistent as well.
void alltoall(std::vector< T > &r) const
Effectively transposes the input vector across all processors.
virtual unsigned int n_vertices() const =0
RefinementState refinement_flag() const
Definition: elem.h:2505
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
Blocking-receive from one processor with data-defined type.
virtual void delete_remote_elements()
GhostingFunctor subclasses with relatively long-lasting caches may want to delete the no-longer-relev...
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, bool newly_coarsened_only, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
void allgather_packed_range(Context *context, Iter range_begin, const Iter range_end, OutputIter out) const
Take a range of local variables, combine it with ranges from all processors, and write the output to ...
uset_type definitive_ids
const Elem * top_parent() const
Definition: elem.h:2370
SimpleRange< NeighborPtrIter > neighbor_ptr_range()
Returns a range with all neighbors of an element, usable in range-based for loops.
Definition: elem.h:2855
virtual unsigned int as_parent_node(unsigned int c, unsigned int n) const
Definition: elem.C:2080
renumber_obj renumber
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1831
void cache_elem_dims()
Search the mesh and cache the different dimensions of the elements present in the mesh...
Definition: mesh_base.C:612
void broadcast_packed_range(const Context *context1, Iter range_begin, const Iter range_end, OutputContext *context2, OutputIter out, const unsigned int root_id=0) const
Blocking-broadcast range-of-pointers to one processor.
IterBase * data
Ideally this private member data should have protected access.
void delete_remote_elements(DistributedMesh &, const std::set< Elem * > &) const
This method takes an input DistributedMesh which may be distributed among all the processors...
dof_id_type id() const
Definition: dof_object.h:632
virtual dof_id_type n_nodes() const =0
void make_new_nodes_parallel_consistent(MeshBase &)
Copy processor_ids and ids on new nodes from their local processors.
virtual dof_id_type n_elem() const =0
virtual element_iterator elements_begin() libmesh_override
Elem iterator accessor functions.
Elem * neighbor(const unsigned int i) const
Definition: elem.h:1988
uint8_t unique_id_type
Definition: id_types.h:79
virtual void delete_elem(Elem *e) libmesh_override
Removes element e from the mesh.
MessageTag get_unique_tag(int tagvalue) const
Get a tag that is unique to this Communicator.
virtual dof_id_type max_elem_id() const libmesh_override
void sync_element_data_by_parent_id(MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
Request data about a range of ghost elements uniquely identified by their parent id and which child t...
processor_id_type processor_id() const
uint8_t dof_id_type
Definition: id_types.h:64
processor_id_type processor_id() const
Definition: dof_object.h:694
virtual void delete_node(Node *n) libmesh_override
Removes the Node n from the mesh.
void family_tree(std::vector< const Elem * > &family, const bool reset=true) const
Fills the vector family with the children of this element, recursively.
Definition: elem.C:1681
const RemoteElem * remote_elem
Definition: remote_elem.C:57
virtual element_iterator elements_end() libmesh_override
virtual element_iterator pid_elements_end(processor_id_type proc_id)=0