libMesh
checkpoint_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 #include "libmesh/checkpoint_io.h"
19 
20 // C++ includes
21 #include <iostream>
22 #include <iomanip>
23 #include <cstdio>
24 #include <vector>
25 #include <string>
26 #include <fstream>
27 #include <sstream> // for ostringstream
28 
29 // Local includes
30 #include "libmesh/boundary_info.h"
31 #include "libmesh/distributed_mesh.h"
32 #include "libmesh/elem.h"
33 #include "libmesh/enum_xdr_mode.h"
34 #include "libmesh/libmesh_logging.h"
35 #include "libmesh/mesh_base.h"
36 #include "libmesh/mesh_communication.h"
37 #include "libmesh/mesh_tools.h"
38 #include "libmesh/node.h"
39 #include "libmesh/parallel.h"
40 #include "libmesh/partitioner.h"
41 #include "libmesh/remote_elem.h"
42 #include "libmesh/xdr_io.h"
43 #include "libmesh/xdr_cxx.h"
44 
45 namespace libMesh
46 {
47 
48 // ------------------------------------------------------------
49 // CheckpointIO members
50 CheckpointIO::CheckpointIO (MeshBase & mesh, const bool binary_in) :
51  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
52  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
53  ParallelObject (mesh),
54  _binary (binary_in),
55  _parallel (false),
56  _version ("checkpoint-1.2"),
57  _my_processor_ids (1, processor_id()),
58  _my_n_processors (n_processors())
59 {
60 }
61 
62 
63 
64 CheckpointIO::CheckpointIO (const MeshBase & mesh, const bool binary_in) :
65  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
66  ParallelObject (mesh),
67  _binary (binary_in),
68  _parallel (false),
71 {
72 }
73 
74 
75 
77 {
78 }
79 
80 
81 
82 
83 void CheckpointIO::write (const std::string & name)
84 {
85  LOG_SCOPE("write()", "CheckpointIO");
86 
87  // convenient reference to our mesh
89 
90  // FIXME: For backwards compatibility, we'll assume for now that we
91  // only want to write distributed meshes in parallel. Later we can
92  // do a gather_to_zero() and support that case too.
93  _parallel = _parallel || !mesh.is_serial();
94 
95  // We'll write a header file from processor 0 to make it easier to do unambiguous
96  // restarts later:
97  if (this->processor_id() == 0)
98  {
99  Xdr io (name, this->binary() ? ENCODE : WRITE);
100 
101  // write the version
102  io.data(_version, "# version");
103 
104  // write what kind of data type we're using
105  header_id_type data_size = sizeof(largest_id_type);
106  io.data(data_size, "# integer size");
107 
108  // Write out the max mesh dimension for backwards compatibility
109  // with code that sets it independently of element dimensions
110  {
111  uint16_t mesh_dimension = mesh.mesh_dimension();
112  io.data(mesh_dimension, "# dimensions");
113  }
114 
115  // Write out whether or not this is serial output
116  {
117  uint16_t parallel = _parallel;
118  io.data(parallel, "# parallel");
119  }
120 
121  // If we're writing out a parallel mesh then we need to write the number of processors
122  // so we can check it upon reading the file
123  if (_parallel)
124  {
126  io.data(n_procs, "# n_procs");
127  }
128 
129  // write subdomain names
130  this->write_subdomain_names(io);
131 
132  // write boundary id names
133  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
134  write_bc_names(io, boundary_info, true); // sideset names
135  write_bc_names(io, boundary_info, false); // nodeset names
136  }
137 
138  // If this is a serial mesh written to a serial file then we're only
139  // going to write local data from processor 0. If this is a mesh being
140  // written in parallel then we're going to write from every
141  // processor.
142  std::vector<processor_id_type> ids_to_write;
143 
144  if (_parallel)
145  {
146  ids_to_write = _my_processor_ids;
147  }
148  else if (mesh.is_serial())
149  {
150  if (mesh.processor_id() == 0)
151  {
152  // placeholder
153  ids_to_write.push_back(0);
154  }
155  }
156  else
157  {
158  libmesh_error_msg("Cannot write serial checkpoint from distributed mesh");
159  }
160 
161  for (std::vector<processor_id_type>::const_iterator
162  id_it = ids_to_write.begin(), id_end = ids_to_write.end();
163  id_it != id_end; ++id_it)
164  {
165  const processor_id_type my_pid = *id_it;
166 
167  std::ostringstream file_name_stream;
168 
169  file_name_stream << name << "-" << (_parallel ? _my_n_processors : 1) << "-" << my_pid;
170 
171  Xdr io (file_name_stream.str(), this->binary() ? ENCODE : WRITE);
172 
173  std::set<const Elem *, CompareElemIdsByLevel> elements;
174 
175  // For serial files or for already-distributed meshs, we write
176  // everything we can see.
177  if (!_parallel || !mesh.is_serial())
178  elements.insert(mesh.elements_begin(), mesh.elements_end());
179  // For parallel files written from serial meshes we write what
180  // we'd be required to keep if we were to be deleting remote
181  // elements. This allows us to write proper parallel files even
182  // from a ReplicateMesh.
183  //
184  // WARNING: If we have a DistributedMesh which used
185  // "add_extra_ghost_elem" rather than ghosting functors to
186  // preserve elements and which is *also* currently serialized
187  // then we're not preserving those elements here. As a quick
188  // workaround user code should delete_remote_elements() before
189  // writing the checkpoint; as a long term workaround user code
190  // should use ghosting functors instead of extra_ghost_elem
191  // lists.
192  else
193  {
194  query_ghosting_functors(mesh, my_pid, false, elements);
196  connect_children(mesh, my_pid, elements);
198  connect_families(elements);
199  }
200 
201  std::set<const Node *> connected_nodes;
202  reconnect_nodes(elements, connected_nodes);
203 
204  // write the nodal locations
205  this->write_nodes (io, connected_nodes);
206 
207  // write connectivity
208  this->write_connectivity (io, elements);
209 
210  // write remote_elem connectivity
211  this->write_remote_elem (io, elements);
212 
213  // write the boundary condition information
214  this->write_bcs (io, elements);
215 
216  // write the nodeset information
217  this->write_nodesets (io, connected_nodes);
218 
219  // close it up
220  io.close();
221  }
222 
223  // this->comm().barrier();
224 }
225 
227 {
228  {
230 
231  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
232 
233  std::vector<largest_id_type> subdomain_ids; subdomain_ids.reserve(subdomain_map.size());
234  std::vector<std::string> subdomain_names; subdomain_names.reserve(subdomain_map.size());
235 
236  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
237  // return writable references in mesh_base, it's possible for the user to leave some entity names
238  // blank. We can't write those to the XDA file.
239  largest_id_type n_subdomain_names = 0;
240  std::map<subdomain_id_type, std::string>::const_iterator it_end = subdomain_map.end();
241  for (std::map<subdomain_id_type, std::string>::const_iterator it = subdomain_map.begin(); it != it_end; ++it)
242  {
243  if (!it->second.empty())
244  {
245  n_subdomain_names++;
246  subdomain_ids.push_back(it->first);
247  subdomain_names.push_back(it->second);
248  }
249  }
250 
251  io.data(n_subdomain_names, "# subdomain id to name map");
252  // Write out the ids and names in two vectors
253  if (n_subdomain_names)
254  {
255  io.data(subdomain_ids);
256  io.data(subdomain_names);
257  }
258  }
259 }
260 
261 
262 
264  const std::set<const Node *> & nodeset) const
265 {
266  largest_id_type n_nodes_here = nodeset.size();
267 
268  io.data(n_nodes_here, "# n_nodes on proc");
269 
270  // Will hold the node id and pid
271  std::vector<largest_id_type> id_pid(2);
272 
273  // For the coordinates
274  std::vector<Real> coords(LIBMESH_DIM);
275 
276  for (std::set<const Node *>::iterator it = nodeset.begin(),
277  end = nodeset.end(); it != end; ++it)
278  {
279  const Node & node = **it;
280 
281  id_pid[0] = node.id();
282  id_pid[1] = node.processor_id();
283 
284  io.data_stream(&id_pid[0], 2, 2);
285 
286 #ifdef LIBMESH_ENABLE_UNIQUE_ID
287  largest_id_type unique_id = node.unique_id();
288 
289  io.data(unique_id, "# unique id");
290 #endif
291 
292  coords[0] = node(0);
293 
294 #if LIBMESH_DIM > 1
295  coords[1] = node(1);
296 #endif
297 
298 #if LIBMESH_DIM > 2
299  coords[2] = node(2);
300 #endif
301 
302  io.data_stream(&coords[0], LIBMESH_DIM, 3);
303  }
304 }
305 
306 
307 
309  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
310 {
311  libmesh_assert (io.writing());
312 
313  // Put these out here to reduce memory churn
314  // id type pid subdomain_id parent_id
315  std::vector<largest_id_type> elem_data(6);
316  std::vector<largest_id_type> conn_data;
317 
318  largest_id_type n_elems_here = elements.size();
319 
320  io.data(n_elems_here, "# number of elements");
321 
322  for (std::set<const Elem *, CompareElemIdsByLevel>::const_iterator it = elements.begin(),
323  end = elements.end(); it != end; ++it)
324  {
325  const Elem & elem = **it;
326 
327  unsigned int n_nodes = elem.n_nodes();
328 
329  elem_data[0] = elem.id();
330  elem_data[1] = elem.type();
331  elem_data[2] = elem.processor_id();
332  elem_data[3] = elem.subdomain_id();
333 
334 #ifdef LIBMESH_ENABLE_AMR
335  if (elem.parent() != libmesh_nullptr)
336  {
337  elem_data[4] = elem.parent()->id();
338  elem_data[5] = elem.parent()->which_child_am_i(&elem);
339  }
340  else
341 #endif
342  {
343  elem_data[4] = DofObject::invalid_processor_id;
344  elem_data[5] = DofObject::invalid_processor_id;
345  }
346 
347  conn_data.resize(n_nodes);
348 
349  for (unsigned int i=0; i<n_nodes; i++)
350  conn_data[i] = elem.node_id(i);
351 
352  io.data_stream(&elem_data[0],
353  cast_int<unsigned int>(elem_data.size()),
354  cast_int<unsigned int>(elem_data.size()));
355 
356 #ifdef LIBMESH_ENABLE_UNIQUE_ID
357  largest_id_type unique_id = elem.unique_id();
358 
359  io.data(unique_id, "# unique id");
360 #endif
361 
362 #ifdef LIBMESH_ENABLE_AMR
363  uint16_t p_level = elem.p_level();
364  io.data(p_level, "# p_level");
365 
366  uint16_t rflag = elem.refinement_flag();
367  io.data(rflag, "# rflag");
368 
369  uint16_t pflag = elem.p_refinement_flag();
370  io.data(pflag, "# pflag");
371 #endif
372  io.data_stream(&conn_data[0],
373  cast_int<unsigned int>(conn_data.size()),
374  cast_int<unsigned int>(conn_data.size()));
375  }
376 }
377 
378 
380  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
381 {
382  libmesh_assert (io.writing());
383 
384  // Find the remote_elem neighbor and child links
385  std::vector<largest_id_type> elem_ids, parent_ids;
386  std::vector<uint16_t> elem_sides, child_numbers;
387 
388  for (std::set<const Elem *, CompareElemIdsByLevel>::const_iterator it = elements.begin(),
389  end = elements.end(); it != end; ++it)
390  {
391  const Elem & elem = **it;
392 
393  for (auto n : elem.side_index_range())
394  {
395  const Elem * neigh = elem.neighbor_ptr(n);
396  if (neigh == remote_elem ||
397  (neigh && !elements.count(neigh)))
398  {
399  elem_ids.push_back(elem.id());
400  elem_sides.push_back(n);
401  }
402  }
403 
404 #ifdef LIBMESH_ENABLE_AMR
405  if (elem.has_children())
406  {
407  const unsigned int nc = elem.n_children();
408  for (unsigned int c = 0; c != nc; ++c)
409  {
410  const Elem * child = elem.child_ptr(c);
411  if (child == remote_elem ||
412  (child && !elements.count(child)))
413  {
414  parent_ids.push_back(elem.id());
415  child_numbers.push_back(c);
416  }
417  }
418  }
419 #endif
420  }
421 
422  io.data(elem_ids, "# remote neighbor elem_ids");
423  io.data(elem_sides, "# remote neighbor elem_sides");
424  io.data(parent_ids, "# remote child parent_ids");
425  io.data(child_numbers, "# remote child_numbers");
426 }
427 
428 
429 
431  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
432 {
433  libmesh_assert (io.writing());
434 
435  // convenient reference to our mesh
437 
438  // and our boundary info object
439  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
440 
441  std::vector<dof_id_type> full_element_id_list;
442  std::vector<uint16_t> full_side_list;
443  std::vector<boundary_id_type> full_bc_id_list;
444 
445  boundary_info.build_side_list(full_element_id_list, full_side_list, full_bc_id_list);
446 
447  std::size_t bc_size = full_element_id_list.size();
448  libmesh_assert_equal_to(bc_size, full_side_list.size());
449  libmesh_assert_equal_to(bc_size, full_bc_id_list.size());
450 
451  std::vector<largest_id_type> element_id_list;
452  std::vector<uint16_t> side_list;
453  std::vector<largest_id_type> bc_id_list;
454 
455  element_id_list.reserve(bc_size);
456  side_list.reserve(bc_size);
457  bc_id_list.reserve(bc_size);
458 
459  for (std::size_t i = 0; i != bc_size; ++i)
460  if (elements.count(mesh.elem_ptr(full_element_id_list[i])))
461  {
462  element_id_list.push_back(full_element_id_list[i]);
463  side_list.push_back(full_side_list[i]);
464  bc_id_list.push_back(full_bc_id_list[i]);
465  }
466 
467 
468  io.data(element_id_list, "# element ids for bcs");
469  io.data(side_list, "# sides of elements for bcs");
470  io.data(bc_id_list, "# bc ids");
471 }
472 
473 
474 
476  const std::set<const Node *> & nodeset) const
477 {
478  libmesh_assert (io.writing());
479 
480  // convenient reference to our mesh
482 
483  // and our boundary info object
484  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
485 
486  std::vector<dof_id_type> full_node_id_list;
487  std::vector<boundary_id_type> full_bc_id_list;
488 
489  boundary_info.build_node_list(full_node_id_list, full_bc_id_list);
490 
491  std::size_t nodeset_size = full_node_id_list.size();
492  libmesh_assert_equal_to(nodeset_size, full_bc_id_list.size());
493 
494  std::vector<largest_id_type> node_id_list;
495  std::vector<largest_id_type> bc_id_list;
496 
497  node_id_list.reserve(nodeset_size);
498  bc_id_list.reserve(nodeset_size);
499 
500  for (std::size_t i = 0; i != nodeset_size; ++i)
501  if (nodeset.count(mesh.node_ptr(full_node_id_list[i])))
502  {
503  node_id_list.push_back(full_node_id_list[i]);
504  bc_id_list.push_back(full_bc_id_list[i]);
505  }
506 
507  io.data(node_id_list, "# node id list");
508  io.data(bc_id_list, "# nodeset bc id list");
509 }
510 
511 
512 
513 void CheckpointIO::write_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
514 {
515  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
517 
518  std::vector<largest_id_type> boundary_ids; boundary_ids.reserve(boundary_map.size());
519  std::vector<std::string> boundary_names; boundary_names.reserve(boundary_map.size());
520 
521  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
522  // return writable references in boundary_info, it's possible for the user to leave some entity names
523  // blank. We can't write those to the XDA file.
524  largest_id_type n_boundary_names = 0;
525  std::map<boundary_id_type, std::string>::const_iterator it_end = boundary_map.end();
526  for (std::map<boundary_id_type, std::string>::const_iterator it = boundary_map.begin(); it != it_end; ++it)
527  {
528  if (!it->second.empty())
529  {
530  n_boundary_names++;
531  boundary_ids.push_back(it->first);
532  boundary_names.push_back(it->second);
533  }
534  }
535 
536  if (is_sideset)
537  io.data(n_boundary_names, "# sideset id to name map");
538  else
539  io.data(n_boundary_names, "# nodeset id to name map");
540  // Write out the ids and names in two vectors
541  if (n_boundary_names)
542  {
543  io.data(boundary_ids);
544  io.data(boundary_names);
545  }
546 }
547 
548 
549 void CheckpointIO::read (const std::string & name)
550 {
551  LOG_SCOPE("read()","CheckpointIO");
552 
554 
555  libmesh_assert(!mesh.n_elem());
556 
557  // What size data is being used in this file?
558  header_id_type data_size;
559 
560  // How many per-processor files are here?
561  largest_id_type input_n_procs;
562 
563  // We'll read a header file from processor 0 and broadcast.
564  if (this->processor_id() == 0)
565  {
566  {
567  std::ifstream in (name.c_str());
568 
569  if (!in.good())
570  libmesh_error_msg("ERROR: cannot locate header file:\n\t" << name);
571  }
572 
573  Xdr io (name, this->binary() ? DECODE : READ);
574 
575  // read the version, but don't care about it
576  std::string input_version;
577  io.data(input_version);
578 
579  // read the data type
580  io.data (data_size);
581  }
582 
583  this->comm().broadcast(data_size);
584 
585  switch (data_size) {
586  case 2:
587  input_n_procs = this->read_header<uint16_t>(name);
588  break;
589  case 4:
590  input_n_procs = this->read_header<uint32_t>(name);
591  break;
592  case 8:
593  input_n_procs = this->read_header<uint64_t>(name);
594  break;
595  default:
596  libmesh_error();
597  }
598 
599  bool input_parallel = input_n_procs;
600  if (!input_n_procs)
601  input_n_procs = 1;
602 
603  // If this is a serial read then we're going to only read the mesh
604  // on processor 0, then broadcast it
605  if ((input_parallel && !mesh.is_replicated()) || mesh.processor_id() == 0)
606  {
607  // If we're trying to read a parallel checkpoint file on a
608  // replicated mesh, we'll read every file on processor 0 so we
609  // can broadcast it later. If we're on a distributed mesh then
610  // we'll read every id to it's own processor and we'll "wrap
611  // around" with any ids that exceed our processor count.
612  const processor_id_type begin_proc_id =
613  (input_parallel && !mesh.is_replicated()) ?
614  mesh.processor_id() : 0;
615  const processor_id_type stride =
616  (input_parallel && !mesh.is_replicated()) ?
617  mesh.n_processors() : 1;
618 
619  for (processor_id_type proc_id = begin_proc_id; proc_id < input_n_procs; proc_id += stride)
620  {
621  std::ostringstream file_name_stream;
622 
623  file_name_stream << name;
624 
625  file_name_stream << "-" << (input_parallel ? input_n_procs : 1) << "-" << proc_id;
626 
627  {
628  std::ifstream in (file_name_stream.str().c_str());
629 
630  if (!in.good())
631  libmesh_error_msg("ERROR: cannot locate specified file:\n\t" << file_name_stream.str());
632  }
633 
634  // Do we expect all our files' remote_elem entries to really
635  // be remote? Only if we're not reading multiple input
636  // files on the same processor.
637  const bool expect_all_remote =
638  (input_n_procs <= mesh.n_processors() &&
639  !mesh.is_replicated());
640 
641  Xdr io (file_name_stream.str(), this->binary() ? DECODE : READ);
642 
643  switch (data_size) {
644  case 2:
645  this->read_subfile<uint16_t>(io, expect_all_remote);
646  break;
647  case 4:
648  this->read_subfile<uint32_t>(io, expect_all_remote);
649  break;
650  case 8:
651  this->read_subfile<uint64_t>(io, expect_all_remote);
652  break;
653  default:
654  libmesh_error();
655  }
656 
657  io.close();
658  }
659  }
660 
661  // If the mesh was only read on processor 0 then we need to broadcast it
662  if (mesh.is_replicated())
664  // If the mesh is really distributed then we need to make sure it
665  // knows that
666  else if (mesh.n_processors() > 1)
667  mesh.set_distributed();
668 }
669 
670 
671 
672 template <typename file_id_type>
673 file_id_type CheckpointIO::read_header (const std::string & name)
674 {
676 
677  // Hack for codes which don't look at all elem dimensions
678  uint16_t mesh_dimension;
679 
680  // Will this be a parallel input file? With how many processors? Stay tuned!
681  uint16_t input_parallel;
682  file_id_type input_n_procs;
683 
684  // We'll write a header file from processor 0 and broadcast.
685  if (this->processor_id() == 0)
686  {
687  Xdr io (name, this->binary() ? DECODE : READ);
688 
689  // read the version, but don't care about it
690  std::string input_version;
691  io.data(input_version);
692 
693  // read the data type, don't care about it this time
694  header_id_type data_size;
695  io.data (data_size);
696 
697  // read the dimension
698  io.data (mesh_dimension);
699 
700  // Read whether or not this is a parallel file
701  io.data(input_parallel);
702 
703  // With how many processors?
704  if (input_parallel)
705  io.data(input_n_procs);
706 
707  // read subdomain names
708  this->read_subdomain_names<file_id_type>(io);
709 
710  // read boundary names
711  BoundaryInfo & boundary_info = mesh.get_boundary_info();
712 
713  this->read_bc_names<file_id_type>(io, boundary_info, true); // sideset names
714  this->read_bc_names<file_id_type>(io, boundary_info, false); // nodeset names
715  }
716 
717  // broadcast data from processor 0, set values everywhere
718  this->comm().broadcast(mesh_dimension);
719  mesh.set_mesh_dimension(mesh_dimension);
720 
721  this->comm().broadcast(input_parallel);
722 
723  if (input_parallel)
724  this->comm().broadcast(input_n_procs);
725  else
726  input_n_procs = 1;
727 
728  std::map<subdomain_id_type, std::string> & subdomain_map =
729  mesh.set_subdomain_name_map();
730  this->comm().broadcast(subdomain_map);
731 
732  BoundaryInfo & boundary_info = mesh.get_boundary_info();
733  this->comm().broadcast(boundary_info.set_sideset_name_map());
734  this->comm().broadcast(boundary_info.set_nodeset_name_map());
735 
736  return input_parallel ? input_n_procs : 0;
737 }
738 
739 
740 
741 template <typename file_id_type>
742 void CheckpointIO::read_subfile (Xdr & io, bool expect_all_remote)
743 {
744  // read the nodal locations
745  this->read_nodes<file_id_type> (io);
746 
747  // read connectivity
748  this->read_connectivity<file_id_type> (io);
749 
750  // read remote_elem connectivity
751  this->read_remote_elem<file_id_type> (io, expect_all_remote);
752 
753  // read the boundary conditions
754  this->read_bcs<file_id_type> (io);
755 
756  // read the nodesets
757  this->read_nodesets<file_id_type> (io);
758 }
759 
760 
761 
762 template <typename file_id_type>
764 {
766 
767  std::map<subdomain_id_type, std::string> & subdomain_map =
768  mesh.set_subdomain_name_map();
769 
770  std::vector<file_id_type> subdomain_ids;
771  subdomain_ids.reserve(subdomain_map.size());
772 
773  std::vector<std::string> subdomain_names;
774  subdomain_names.reserve(subdomain_map.size());
775 
776  file_id_type n_subdomain_names = 0;
777  io.data(n_subdomain_names, "# subdomain id to name map");
778 
779  if (n_subdomain_names)
780  {
781  io.data(subdomain_ids);
782  io.data(subdomain_names);
783 
784  for (std::size_t i=0; i<subdomain_ids.size(); i++)
785  subdomain_map[cast_int<subdomain_id_type>(subdomain_ids[i])] =
786  subdomain_names[i];
787  }
788 }
789 
790 
791 
792 template <typename file_id_type>
794 {
795  // convenient reference to our mesh
797 
798  file_id_type n_nodes_here;
799  io.data(n_nodes_here, "# n_nodes on proc");
800 
801  // Will hold the node id and pid
802  std::vector<file_id_type> id_pid(2);
803 
804  // For the coordinates
805  std::vector<Real> coords(LIBMESH_DIM);
806 
807  for (unsigned int i=0; i<n_nodes_here; i++)
808  {
809  io.data_stream(&id_pid[0], 2, 2);
810 
811 #ifdef LIBMESH_ENABLE_UNIQUE_ID
812  file_id_type unique_id = 0;
813  io.data(unique_id, "# unique id");
814 #endif
815 
816  io.data_stream(&coords[0], LIBMESH_DIM, LIBMESH_DIM);
817 
818  Point p;
819  p(0) = coords[0];
820 
821 #if LIBMESH_DIM > 1
822  p(1) = coords[1];
823 #endif
824 
825 #if LIBMESH_DIM > 2
826  p(2) = coords[2];
827 #endif
828 
829  const dof_id_type id = cast_int<dof_id_type>(id_pid[0]);
830 
831  // "Wrap around" if we see more processors than we're using.
832  processor_id_type pid =
833  cast_int<processor_id_type>(id_pid[1] % mesh.n_processors());
834 
835  // If we already have this node (e.g. from another file, when
836  // reading multiple distributed CheckpointIO files into a
837  // ReplicatedMesh) then we don't want to add it again (because
838  // ReplicatedMesh can't handle that) but we do want to assert
839  // consistency between what we're reading and what we have.
840  const Node * old_node = mesh.query_node_ptr(id);
841 
842  if (old_node)
843  {
844  libmesh_assert_equal_to(pid, old_node->processor_id());
845 #ifdef LIBMESH_ENABLE_UNIQUE_ID
846  libmesh_assert_equal_to(unique_id, old_node->unique_id());
847 #endif
848  }
849  else
850  {
851 #ifdef LIBMESH_ENABLE_UNIQUE_ID
852  Node * node =
853 #endif
854  mesh.add_point(p, id, pid);
855 
856 #ifdef LIBMESH_ENABLE_UNIQUE_ID
857  node->set_unique_id() = unique_id;
858 #endif
859  }
860  }
861 }
862 
863 
864 
865 template <typename file_id_type>
867 {
868  // convenient reference to our mesh
870 
871  file_id_type n_elems_here;
872  io.data(n_elems_here);
873 
874  // Keep track of the highest dimensional element we've added to the mesh
875  unsigned int highest_elem_dim = 1;
876 
877  for (unsigned int i=0; i<n_elems_here; i++)
878  {
879  // id type pid subdomain_id parent_id
880  std::vector<file_id_type> elem_data(6);
881  io.data_stream
882  (&elem_data[0], cast_int<unsigned int>(elem_data.size()),
883  cast_int<unsigned int>(elem_data.size()));
884 
885 #ifdef LIBMESH_ENABLE_UNIQUE_ID
886  file_id_type unique_id = 0;
887  io.data(unique_id, "# unique id");
888 #endif
889 
890 #ifdef LIBMESH_ENABLE_AMR
891  uint16_t p_level = 0;
892  io.data(p_level, "# p_level");
893 
894  uint16_t rflag, pflag;
895  io.data(rflag, "# rflag");
896  io.data(pflag, "# pflag");
897 #endif
898 
899  unsigned int n_nodes = Elem::type_to_n_nodes_map[elem_data[1]];
900 
901  // Snag the node ids this element was connected to
902  std::vector<file_id_type> conn_data(n_nodes);
903  io.data_stream
904  (&conn_data[0], cast_int<unsigned int>(conn_data.size()),
905  cast_int<unsigned int>(conn_data.size()));
906 
907  const dof_id_type id =
908  cast_int<dof_id_type> (elem_data[0]);
909  const ElemType elem_type =
910  static_cast<ElemType> (elem_data[1]);
911  const processor_id_type proc_id =
912  cast_int<processor_id_type>
913  (elem_data[2] % mesh.n_processors());
914  const subdomain_id_type subdomain_id =
915  cast_int<subdomain_id_type>(elem_data[3]);
916  const dof_id_type parent_id =
917  cast_int<dof_id_type> (elem_data[4]);
918  const unsigned short int child_num =
919  cast_int<dof_id_type> (elem_data[5]);
920 
921  Elem * parent =
922  (parent_id == DofObject::invalid_processor_id) ?
923  libmesh_nullptr : mesh.elem_ptr(parent_id);
924 
925  if (!parent)
926  libmesh_assert_equal_to
927  (child_num, DofObject::invalid_processor_id);
928 
929  Elem * old_elem = mesh.query_elem_ptr(id);
930 
931  // If we already have this element (e.g. from another file,
932  // when reading multiple distributed CheckpointIO files into
933  // a ReplicatedMesh) then we don't want to add it again
934  // (because ReplicatedMesh can't handle that) but we do want
935  // to assert consistency between what we're reading and what
936  // we have.
937  if (old_elem)
938  {
939  libmesh_assert_equal_to(elem_type, old_elem->type());
940  libmesh_assert_equal_to(proc_id, old_elem->processor_id());
941  libmesh_assert_equal_to(subdomain_id, old_elem->subdomain_id());
942  if (parent)
943  libmesh_assert_equal_to(parent, old_elem->parent());
944  else
945  libmesh_assert(!old_elem->parent());
946 
947  libmesh_assert_equal_to(old_elem->n_nodes(), conn_data.size());
948 
949  for (std::size_t n=0; n != conn_data.size(); ++n)
950  libmesh_assert_equal_to
951  (old_elem->node_id(n),
952  cast_int<dof_id_type>(conn_data[n]));
953  }
954  else
955  {
956  // Create the element
957  Elem * elem = Elem::build(elem_type, parent).release();
958 
959 #ifdef LIBMESH_ENABLE_UNIQUE_ID
960  elem->set_unique_id() = unique_id;
961 #endif
962 
963  if (elem->dim() > highest_elem_dim)
964  highest_elem_dim = elem->dim();
965 
966  elem->set_id() = id;
967  elem->processor_id() = proc_id;
968  elem->subdomain_id() = subdomain_id;
969 
970 #ifdef LIBMESH_ENABLE_AMR
971  elem->hack_p_level(p_level);
972 
973  elem->set_refinement_flag (cast_int<Elem::RefinementState>(rflag));
974  elem->set_p_refinement_flag(cast_int<Elem::RefinementState>(pflag));
975 
976  // Set parent connections
977  if (parent)
978  {
979  // We must specify a child_num, because we will have
980  // skipped adding any preceding remote_elem children
981  parent->add_child(elem, child_num);
982  }
983 #endif
984 
985  libmesh_assert(elem->n_nodes() == conn_data.size());
986 
987  // Connect all the nodes to this element
988  for (std::size_t n=0; n<conn_data.size(); n++)
989  elem->set_node(n) =
990  mesh.node_ptr(cast_int<dof_id_type>(conn_data[n]));
991 
992  mesh.add_elem(elem);
993  }
994  }
995 
996  mesh.set_mesh_dimension(cast_int<unsigned char>(highest_elem_dim));
997 }
998 
999 
1000 template <typename file_id_type>
1001 void CheckpointIO::read_remote_elem (Xdr & io, bool libmesh_dbg_var(expect_all_remote))
1002 {
1003  // convenient reference to our mesh
1005 
1006  // Find the remote_elem neighbor links
1007  std::vector<file_id_type> elem_ids;
1008  std::vector<uint16_t> elem_sides;
1009 
1010  io.data(elem_ids, "# remote neighbor elem_ids");
1011  io.data(elem_sides, "# remote neighbor elem_sides");
1012 
1013  libmesh_assert_equal_to(elem_ids.size(), elem_sides.size());
1014 
1015  for (std::size_t i=0; i != elem_ids.size(); ++i)
1016  {
1017  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(elem_ids[i]));
1018  if (!elem.neighbor_ptr(elem_sides[i]))
1019  elem.set_neighbor(elem_sides[i],
1020  const_cast<RemoteElem *>(remote_elem));
1021  else
1022  libmesh_assert(!expect_all_remote);
1023  }
1024 
1025  // Find the remote_elem children links
1026  std::vector<file_id_type> parent_ids;
1027  std::vector<uint16_t> child_numbers;
1028 
1029  io.data(parent_ids, "# remote child parent_ids");
1030  io.data(child_numbers, "# remote child_numbers");
1031 
1032 #ifdef LIBMESH_ENABLE_AMR
1033  for (std::size_t i=0; i != parent_ids.size(); ++i)
1034  {
1035  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(parent_ids[i]));
1036 
1037  // We'd like to assert that no child pointer already exists to
1038  // be overwritten by remote_elem, but Elem doesn't actually have
1039  // an API that will return a child pointer without asserting
1040  // that it isn't NULL
1041  //
1042  const Elem * child = elem.raw_child_ptr(child_numbers[i]);
1043 
1044  if (!child)
1045  elem.add_child(const_cast<RemoteElem *>(remote_elem),
1046  child_numbers[i]);
1047  else
1048  libmesh_assert(!expect_all_remote);
1049  }
1050 #endif
1051 }
1052 
1053 
1054 
1055 template <typename file_id_type>
1057 {
1058  // convenient reference to our mesh
1060 
1061  // and our boundary info object
1062  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1063 
1064  std::vector<file_id_type> element_id_list;
1065  std::vector<uint16_t> side_list;
1066  std::vector<file_id_type> bc_id_list;
1067 
1068  io.data(element_id_list, "# element ids for bcs");
1069  io.data(side_list, "# sides of elements for bcs");
1070  io.data(bc_id_list, "# bc ids");
1071 
1072  for (std::size_t i=0; i<element_id_list.size(); i++)
1073  boundary_info.add_side
1074  (cast_int<dof_id_type>(element_id_list[i]), side_list[i],
1075  cast_int<dof_id_type>(bc_id_list[i]));
1076 }
1077 
1078 
1079 
1080 template <typename file_id_type>
1082 {
1083  // convenient reference to our mesh
1085 
1086  // and our boundary info object
1087  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1088 
1089  std::vector<file_id_type> node_id_list;
1090  std::vector<file_id_type> bc_id_list;
1091 
1092  io.data(node_id_list, "# node id list");
1093  io.data(bc_id_list, "# nodeset bc id list");
1094 
1095  for (std::size_t i=0; i<node_id_list.size(); i++)
1096  boundary_info.add_node
1097  (cast_int<dof_id_type>(node_id_list[i]),
1098  cast_int<boundary_id_type>(bc_id_list[i]));
1099 }
1100 
1101 
1102 
1103 template <typename file_id_type>
1104 void CheckpointIO::read_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
1105 {
1106  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1108 
1109  std::vector<file_id_type> boundary_ids;
1110  std::vector<std::string> boundary_names;
1111 
1112  file_id_type n_boundary_names = 0;
1113 
1114  if (is_sideset)
1115  io.data(n_boundary_names, "# sideset id to name map");
1116  else
1117  io.data(n_boundary_names, "# nodeset id to name map");
1118 
1119  if (n_boundary_names)
1120  {
1121  io.data(boundary_ids);
1122  io.data(boundary_names);
1123  }
1124 
1125  // Add them back into the map
1126  for (std::size_t i=0; i<boundary_ids.size(); i++)
1127  boundary_map[cast_int<boundary_id_type>(boundary_ids[i])] =
1128  boundary_names[i];
1129 }
1130 
1131 
1134 {
1135  unsigned int max_level = 0;
1136 
1137  for (MeshBase::const_element_iterator it = begin;
1138  it != end; ++it)
1139  max_level = std::max((*it)->level(), max_level);
1140 
1141  return max_level + 1;
1142 }
1143 
1144 } // namespace libMesh
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:39
void data(T &a, const char *comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:761
bool has_children() const
Definition: elem.h:2295
void write_nodes(Xdr &io, const std::set< const Node * > &nodeset) const
Write the nodal locations for part of a mesh.
void write_nodesets(Xdr &io, const std::set< const Node * > &nodeset) const
Write the nodal boundary conditions for part of a mesh.
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:117
unique_id_type & set_unique_id()
Definition: dof_object.h:662
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:1941
A Node is like a Point, but with more information.
Definition: node.h:52
virtual bool is_serial() const
Definition: mesh_base.h:140
static UniquePtr< Elem > build(const ElemType type, Elem *p=libmesh_nullptr)
Definition: elem.C:238
virtual ElemType type() const =0
void write_connectivity(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
Write the connectivity for part of a mesh.
unsigned int p_level() const
Definition: elem.h:2422
The definition of the const_element_iterator struct.
Definition: mesh_base.h:1494
void connect_children(const MeshBase &mesh, processor_id_type pid, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual bool is_replicated() const
Definition: mesh_base.h:162
IntRange< unsigned short > side_index_range() const
Definition: elem.h:2083
void reconnect_nodes(const std::set< const Elem *, CompareElemIdsByLevel > &connected_elements, std::set< const Node * > &connected_nodes)
void broadcast(MeshBase &) const
Finds all the processors that may contain elements that neighbor my elements.
ElemType
Defines an enum for geometric element types.
const Elem * parent() const
Definition: elem.h:2346
processor_id_type n_processors() const
This is the base class from which all geometric element types are derived.
Definition: elem.h:89
uint64_t largest_id_type
Definition: id_types.h:139
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
void add_child(Elem *elem)
Adds a child pointer to the array of children of this element.
Definition: elem.C:1608
const class libmesh_nullptr_t libmesh_nullptr
void set_refinement_flag(const RefinementState rflag)
Sets the value of the refinement flag for the element.
Definition: elem.h:2513
virtual const Node * node_ptr(const dof_id_type i) const =0
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end...
const MT & mesh() const
Definition: mesh_output.h:216
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
void read_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset)
The libMesh namespace provides an interface to certain functionality in the library.
void read_remote_elem(Xdr &io, bool expect_all_remote)
Read the remote_elem neighbor and child links for a parallel, distributed mesh.
void build_side_list(std::vector< dof_id_type > &element_id_list, std::vector< unsigned short int > &side_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of element numbers, sides, and ids for those sides.
long double max(long double a, double b)
unsigned int max_level(const MeshBase &mesh)
Find the maximum h-refinement level in a mesh.
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:564
const std::map< boundary_id_type, std::string > & get_sideset_name_map() const
This is the MeshBase class.
Definition: mesh_base.h:68
dof_id_type & set_id()
Definition: dof_object.h:641
libmesh_assert(j)
virtual void set_distributed()
Asserts that not all elements and nodes of the mesh necessarily exist on the current processor...
Definition: mesh_base.h:155
virtual unsigned int n_nodes() const =0
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:1967
virtual element_iterator elements_begin()=0
Iterate over all the elements in the Mesh.
bool parallel() const
Get/Set the flag indicating if we should read/write binary.
virtual void read(const std::string &) libmesh_override
This method implements reading a mesh from a specified file.
std::vector< processor_id_type > _my_processor_ids
std::map< boundary_id_type, std::string > & set_sideset_name_map()
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
void write_remote_elem(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
Write the remote_elem neighbor and child links for part of a mesh.
const dof_id_type n_nodes
Definition: tecplot_io.C:67
void write_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset)
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1295
This is the MeshCommunication class.
void write_subdomain_names(Xdr &io) const
Write subdomain name information.
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:50
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
Definition: dof_object.h:335
processor_id_type _my_n_processors
const std::map< boundary_id_type, std::string > & get_nodeset_name_map() const
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
void connect_families(std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
virtual element_iterator elements_end()=0
bool writing() const
Definition: xdr_cxx.h:119
CheckpointIO(MeshBase &, const bool=false)
Constructor.
Definition: checkpoint_io.C:50
virtual const Node * query_node_ptr(const dof_id_type i) const =0
The BoundaryInfo class contains information relevant to boundary conditions including storing faces...
Definition: boundary_info.h:56
const Elem * child_ptr(unsigned int i) const
Definition: elem.h:2445
void write_bcs(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
Write the side boundary conditions for part of a mesh.
RefinementState p_refinement_flag() const
Definition: elem.h:2521
subdomain_id_type subdomain_id() const
Definition: elem.h:1951
PetscErrorCode Vec Mat libmesh_dbg_var(j)
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:199
void broadcast(T &data, const unsigned int root_id=0) const
Take a local value and broadcast it to all processors.
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1293
void set_neighbor(const unsigned int i, Elem *n)
Assigns n as the neighbor.
Definition: elem.h:2000
void build_node_list(std::vector< dof_id_type > &node_id_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of nodes and ids for those nodes.
This class forms the base class for all other classes that are expected to be implemented in parallel...
file_id_type read_header(const std::string &name)
Read header data on processor 0, then broadcast.
virtual unsigned int n_children() const =0
unsigned int n_active_levels_in(MeshBase::const_element_iterator begin, MeshBase::const_element_iterator end) const
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:68
void read_subdomain_names(Xdr &io)
Read subdomain name information.
unsigned int which_child_am_i(const Elem *e) const
Definition: elem.h:2487
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:490
void read_bcs(Xdr &io)
Read the boundary conditions for a parallel, distributed mesh.
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
void read_subfile(Xdr &io, bool expect_all_remote)
Read a non-header file.
const Elem * raw_child_ptr(unsigned int i) const
Definition: elem.h:2436
void read_nodesets(Xdr &io)
Read the nodeset conditions for a parallel, distributed mesh.
void read_nodes(Xdr &io)
Read the nodal locations for a parallel, distributed mesh.
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
const Parallel::Communicator & comm() const
unsigned int mesh_dimension() const
Definition: mesh_base.C:148
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
virtual unsigned int dim() const =0
RefinementState refinement_flag() const
Definition: elem.h:2505
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, bool newly_coarsened_only, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
void set_p_refinement_flag(const RefinementState pflag)
Sets the value of the p-refinement flag for the element.
Definition: elem.h:2529
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1831
dof_id_type id() const
Definition: dof_object.h:632
void read_connectivity(Xdr &io)
Read the connectivity for a parallel, distributed mesh.
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:825
virtual ~CheckpointIO()
Destructor.
Definition: checkpoint_io.C:76
virtual const Elem * elem_ptr(const dof_id_type i) const =0
virtual dof_id_type n_elem() const =0
unique_id_type unique_id() const
Definition: dof_object.h:649
virtual void write(const std::string &) libmesh_override
This method implements writing a mesh to a specified file.
Definition: checkpoint_io.C:83
void hack_p_level(const unsigned int p)
Sets the value of the p-refinement level for the element without altering the p-level of its ancestor...
Definition: elem.h:2606
bool binary() const
Get/Set the flag indicating if we should read/write binary.
Definition: checkpoint_io.h:99
processor_id_type processor_id()
Definition: libmesh_base.h:96
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
uint8_t dof_id_type
Definition: id_types.h:64
processor_id_type processor_id() const
processor_id_type processor_id() const
Definition: dof_object.h:694
processor_id_type n_processors()
Definition: libmesh_base.h:88
const RemoteElem * remote_elem
Definition: remote_elem.C:57