libMesh
xdr_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // C++ includes
21 #include <iostream>
22 #include <iomanip>
23 #include <cstdio>
24 #include <vector>
25 #include <string>
26 
27 // Local includes
28 #include "libmesh/xdr_io.h"
29 #include "libmesh/xdr_cxx.h"
30 #include "libmesh/enum_xdr_mode.h"
31 #include "libmesh/mesh_base.h"
32 #include "libmesh/node.h"
33 #include "libmesh/elem.h"
34 #include "libmesh/boundary_info.h"
35 #include "libmesh/parallel.h"
36 #include "libmesh/mesh_tools.h"
37 #include "libmesh/partitioner.h"
38 #include "libmesh/libmesh_logging.h"
39 
40 namespace libMesh
41 {
42 
43 //-----------------------------------------------
44 // anonymous namespace for implementation details
45 namespace {
46 struct DofBCData
47 {
49  unsigned short int side;
51 
52  // Default constructor
53  DofBCData (dof_id_type dof_id_in=0,
54  unsigned short int side_in=0,
55  boundary_id_type bc_id_in=0) :
56  dof_id(dof_id_in),
57  side(side_in),
58  bc_id(bc_id_in)
59  {}
60 
61  // comparison operator
62  bool operator < (const DofBCData & other) const
63  {
64  if (this->dof_id == other.dof_id)
65  return (this->side < other.side);
66 
67  return this->dof_id < other.dof_id;
68  }
69 };
70 
71 // comparison operator
72 bool operator < (const unsigned int & other_dof_id,
73  const DofBCData & dof_bc)
74 {
75  return other_dof_id < dof_bc.dof_id;
76 }
77 
78 bool operator < (const DofBCData & dof_bc,
79  const unsigned int & other_dof_id)
80 
81 {
82  return dof_bc.dof_id < other_dof_id;
83 }
84 
85 
86 // For some reason SunStudio does not seem to accept the above
87 // comparison functions for use in
88 // std::equal_range (ElemBCData::iterator, ElemBCData::iterator, unsigned int);
89 #if defined(__SUNPRO_CC) || defined(__PGI)
90 struct CompareIntDofBCData
91 {
92  bool operator()(const unsigned int & other_dof_id,
93  const DofBCData & dof_bc)
94  {
95  return other_dof_id < dof_bc.dof_id;
96  }
97 
98  bool operator()(const DofBCData & dof_bc,
99  const unsigned int & other_dof_id)
100  {
101  return dof_bc.dof_id < other_dof_id;
102  }
103 };
104 #endif
105 
106 template <class T, class U>
107 struct libmesh_type_is_same {
108  static const bool value = false;
109 };
110 
111 template <class T>
112 struct libmesh_type_is_same<T, T> {
113  static const bool value = true;
114 };
115 
116 }
117 
118 
119 
120 // ------------------------------------------------------------
121 // XdrIO static data
122 const std::size_t XdrIO::io_blksize = 128000;
123 
124 
125 
126 // ------------------------------------------------------------
127 // XdrIO members
128 XdrIO::XdrIO (MeshBase & mesh, const bool binary_in) :
129  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
130  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
131  ParallelObject (mesh),
132  _binary (binary_in),
133  _legacy (false),
134  _write_serial (false),
135  _write_parallel (false),
136 #ifdef LIBMESH_ENABLE_UNIQUE_ID
137  _write_unique_id (true),
138 #else
139  _write_unique_id (false),
140 #endif
141  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
142  _version ("libMesh-1.3.0"),
143  _bc_file_name ("n/a"),
144  _partition_map_file ("n/a"),
145  _subdomain_map_file ("n/a"),
146  _p_level_file ("n/a")
147 {
148 }
149 
150 
151 
152 XdrIO::XdrIO (const MeshBase & mesh, const bool binary_in) :
153  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
154  ParallelObject (mesh),
155  _binary (binary_in)
156 {
157 }
158 
159 
160 
162 {
163 }
164 
165 
166 
167 void XdrIO::write (const std::string & name)
168 {
169  if (this->legacy())
170  libmesh_error_msg("We don't support writing parallel files in the legacy format.");
171 
172  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
173 
174  START_LOG("write()","XdrIO");
175 
176  // convenient reference to our mesh
178 
181 
182  libmesh_assert(n_elem == mesh.n_elem());
183  libmesh_assert(n_nodes == mesh.n_nodes());
184 
186  new_header_id_type n_edge_bcs = mesh.get_boundary_info().n_edge_conds();
187  new_header_id_type n_shellface_bcs = mesh.get_boundary_info().n_shellface_conds();
189  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
190 
191  bool write_parallel_files = this->write_parallel();
192 
193  //-------------------------------------------------------------
194  // For all the optional files -- the default file name is "n/a".
195  // However, the user may specify an optional external file.
196 
197  // If there are BCs and the user has not already provided a
198  // file name then write to "."
199  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
200  this->boundary_condition_file_name() == "n/a")
201  this->boundary_condition_file_name() = ".";
202 
203  // If there are more than one subdomains and the user has not specified an
204  // external file then write the subdomain mapping to the default file "."
205  if ((mesh.n_subdomains() > 0) &&
206  (this->subdomain_map_file_name() == "n/a"))
207  this->subdomain_map_file_name() = ".";
208 
209  // In general we don't write the partition information.
210 
211  // If we have p levels and the user has not already provided
212  // a file name then write to "."
213  if ((n_p_levels > 1) &&
214  (this->polynomial_level_file_name() == "n/a"))
215  this->polynomial_level_file_name() = ".";
216 
217  // write the header
218  if (this->processor_id() == 0)
219  {
220  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
221  io.data (full_ver);
222 
223  io.data (n_elem, "# number of elements");
224  io.data (n_nodes, "# number of nodes");
225 
226  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
227  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
228  io.data (this->partition_map_file_name(), "# processor id specification file");
229  io.data (this->polynomial_level_file_name(), "# p-level specification file");
230 
231  // Version 0.9.2+ introduces sizes for each type
232  new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
233 
234  const bool
235  write_p_level = ("." == this->polynomial_level_file_name()),
236  write_partitioning = ("." == this->partition_map_file_name()),
237  write_subdomain_id = ("." == this->subdomain_map_file_name()),
238  write_bcs = ("." == this->boundary_condition_file_name());
239 
240  io.data (write_size, "# type size");
241  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
242  io.data (write_partitioning ? write_size : zero_size, "# pid size");
243  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
244  io.data (write_p_level ? write_size : zero_size, "# p-level size");
245  // Boundary Condition sizes
246  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
247  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
248  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
249  }
250 
251  if (write_parallel_files)
252  {
253  // Parallel xdr mesh files aren't implemented yet; until they
254  // are we'll just warn the user and write a serial file.
255  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
256  libMesh::out << "Writing a serialized file instead." << std::endl;
257 
258  // write subdomain names
260 
261  // write connectivity
262  this->write_serialized_connectivity (io, n_elem);
263 
264  // write the nodal locations
265  this->write_serialized_nodes (io, n_nodes);
266 
267  // write the side boundary condition information
268  this->write_serialized_side_bcs (io, n_side_bcs);
269 
270  // write the nodeset information
271  this->write_serialized_nodesets (io, n_nodesets);
272 
273  // write the edge boundary condition information
274  this->write_serialized_edge_bcs (io, n_edge_bcs);
275 
276  // write the "shell face" boundary condition information
277  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
278  }
279  else
280  {
281  // write subdomain names
283 
284  // write connectivity
285  this->write_serialized_connectivity (io, n_elem);
286 
287  // write the nodal locations
288  this->write_serialized_nodes (io, n_nodes);
289 
290  // write the side boundary condition information
291  this->write_serialized_side_bcs (io, n_side_bcs);
292 
293  // write the nodeset information
294  this->write_serialized_nodesets (io, n_nodesets);
295 
296  // write the edge boundary condition information
297  this->write_serialized_edge_bcs (io, n_edge_bcs);
298 
299  // write the "shell face" boundary condition information
300  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
301  }
302 
303  STOP_LOG("write()","XdrIO");
304 
305  // pause all processes until the writing ends -- this will
306  // protect for the pathological case where a write is
307  // followed immediately by a read. The write must be
308  // guaranteed to complete first.
309  io.close();
310  this->comm().barrier();
311 }
312 
313 
314 
316 {
317  if (this->processor_id() == 0)
318  {
320 
321  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
322 
323  std::vector<new_header_id_type> subdomain_ids;
324  subdomain_ids.reserve(subdomain_map.size());
325 
326  std::vector<std::string> subdomain_names;
327  subdomain_names.reserve(subdomain_map.size());
328 
329  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
330  // return writable references in mesh_base, it's possible for the user to leave some entity names
331  // blank. We can't write those to the XDA file.
332  new_header_id_type n_subdomain_names = 0;
333  std::map<subdomain_id_type, std::string>::const_iterator it_end = subdomain_map.end();
334  for (std::map<subdomain_id_type, std::string>::const_iterator it = subdomain_map.begin(); it != it_end; ++it)
335  {
336  if (!it->second.empty())
337  {
338  n_subdomain_names++;
339  subdomain_ids.push_back(it->first);
340  subdomain_names.push_back(it->second);
341  }
342  }
343 
344  io.data(n_subdomain_names, "# subdomain id to name map");
345  // Write out the ids and names in two vectors
346  if (n_subdomain_names)
347  {
348  io.data(subdomain_ids);
349  io.data(subdomain_names);
350  }
351  }
352 }
353 
354 
355 
357 {
358  libmesh_assert (io.writing());
359 
360  const bool
361  write_p_level = ("." == this->polynomial_level_file_name()),
362  write_partitioning = ("." == this->partition_map_file_name()),
363  write_subdomain_id = ("." == this->subdomain_map_file_name());
364 
365  // convenient reference to our mesh
367  libmesh_assert_equal_to (n_elem, mesh.n_elem());
368 
369  // We will only write active elements and their parents.
370  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
371  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
372 
374 
375  // Find the number of local and global elements at each level
376 #ifndef NDEBUG
377  xdr_id_type tot_n_elem = 0;
378 #endif
379  for (unsigned int level=0; level<n_active_levels; level++)
380  {
381  it = mesh.local_level_elements_begin(level);
382  end = mesh.local_level_elements_end(level);
383 
384  n_global_elem_at_level[level] = MeshTools::n_elem(it, end);
385 
386  this->comm().sum(n_global_elem_at_level[level]);
387 #ifndef NDEBUG
388  tot_n_elem += n_global_elem_at_level[level];
389 #endif
390  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
391  libmesh_assert_less_equal (tot_n_elem, n_elem);
392  }
393 
394  std::vector<xdr_id_type>
395  xfer_conn, recv_conn;
396  std::vector<dof_id_type>
397  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
398  std::vector<xdr_id_type> output_buffer;
399  std::vector<std::size_t>
400  xfer_buf_sizes(this->n_processors());
401 
402 #ifdef LIBMESH_ENABLE_AMR
403  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
404  id_map_type parent_id_map, child_id_map;
405 #endif
406 
407  dof_id_type my_next_elem=0, next_global_elem=0;
408 
409  //-------------------------------------------
410  // First write the level-0 elements directly.
411  it = mesh.local_level_elements_begin(0);
412  end = mesh.local_level_elements_end(0);
413  for (; it != end; ++it, ++my_next_elem)
414  {
415  pack_element (xfer_conn, *it);
416 #ifdef LIBMESH_ENABLE_AMR
417  parent_id_map[(*it)->id()] = std::make_pair(this->processor_id(),
418  my_next_elem);
419 #endif
420  }
421  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
422 
423  std::size_t my_size = xfer_conn.size();
424  this->comm().gather (0, my_next_elem, n_elem_on_proc);
425  this->comm().gather (0, my_size, xfer_buf_sizes);
426 
427  processor_offsets[0] = 0;
428  for (unsigned int pid=1; pid<this->n_processors(); pid++)
429  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
430 
431  // All processors send their xfer buffers to processor 0.
432  // Processor 0 will receive the data and write out the elements.
433  if (this->processor_id() == 0)
434  {
435  // Write the number of elements at this level.
436  {
437  std::string comment = "# n_elem at level 0", legend = ", [ type ";
438  if (_write_unique_id)
439  legend += "uid ";
440  if (write_partitioning)
441  legend += "pid ";
442  if (write_subdomain_id)
443  legend += "sid ";
444  if (write_p_level)
445  legend += "p_level ";
446  legend += "(n0 ... nN-1) ]";
447  comment += legend;
448  io.data (n_global_elem_at_level[0], comment.c_str());
449  }
450 
451  for (unsigned int pid=0; pid<this->n_processors(); pid++)
452  {
453  recv_conn.resize(xfer_buf_sizes[pid]);
454  if (pid == 0)
455  recv_conn = xfer_conn;
456  else
457  this->comm().receive (pid, recv_conn);
458 
459  // at a minimum, the buffer should contain the number of elements,
460  // which could be 0.
461  libmesh_assert (!recv_conn.empty());
462 
463  {
464  const xdr_id_type n_elem_received = recv_conn.back();
465  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
466 
467  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
468  {
469  output_buffer.clear();
470 
471  // n. nodes
472  const xdr_id_type n_nodes = *recv_conn_iter;
473  ++recv_conn_iter;
474 
475  // type
476  output_buffer.push_back(*recv_conn_iter);
477  ++recv_conn_iter;
478 
479  // unique_id
480  if (_write_unique_id)
481  output_buffer.push_back(*recv_conn_iter);
482  ++recv_conn_iter;
483 
484  // processor id
485  if (write_partitioning)
486  output_buffer.push_back(*recv_conn_iter);
487  ++recv_conn_iter;
488 
489  // subdomain id
490  if (write_subdomain_id)
491  output_buffer.push_back(*recv_conn_iter);
492  ++recv_conn_iter;
493 
494 #ifdef LIBMESH_ENABLE_AMR
495  // p level
496  if (write_p_level)
497  output_buffer.push_back(*recv_conn_iter);
498  ++recv_conn_iter;
499 #endif
500  for (dof_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
501  output_buffer.push_back(*recv_conn_iter);
502 
503  io.data_stream
504  (&output_buffer[0],
505  cast_int<unsigned int>(output_buffer.size()),
506  cast_int<unsigned int>(output_buffer.size()));
507  }
508  }
509  }
510  }
511  else
512  this->comm().send (0, xfer_conn);
513 
514 #ifdef LIBMESH_ENABLE_AMR
515  //--------------------------------------------------------------------
516  // Next write the remaining elements indirectly through their parents.
517  // This will insure that the children are written in the proper order
518  // so they can be reconstructed properly.
519  for (unsigned int level=1; level<n_active_levels; level++)
520  {
521  xfer_conn.clear();
522 
523  it = mesh.local_level_elements_begin(level-1);
524  end = mesh.local_level_elements_end (level-1);
525 
526  dof_id_type my_n_elem_written_at_level = 0;
527  for (; it != end; ++it)
528  if (!(*it)->active()) // we only want the parents elements at this level, and
529  { // there is no direct iterator for this obscure use
530  const Elem * parent = *it;
531  id_map_type::iterator pos = parent_id_map.find(parent->id());
532  libmesh_assert (pos != parent_id_map.end());
533  const processor_id_type parent_pid = pos->second.first;
534  const dof_id_type parent_id = pos->second.second;
535  parent_id_map.erase(pos);
536 
537  for (auto & child : parent->child_ref_range())
538  {
539  pack_element (xfer_conn, &child, parent_id, parent_pid);
540 
541  // this aproach introduces the possibility that we write
542  // non-local elements. These elements may well be parents
543  // at the next step
544  child_id_map[child.id()] = std::make_pair (child.processor_id(),
545  my_n_elem_written_at_level++);
546  my_next_elem++;
547  }
548  }
549  xfer_conn.push_back(my_n_elem_written_at_level);
550  my_size = xfer_conn.size();
551  this->comm().gather (0, my_size, xfer_buf_sizes);
552 
553  // Processor 0 will receive the data and write the elements.
554  if (this->processor_id() == 0)
555  {
556  // Write the number of elements at this level.
557  {
558  char buf[80];
559  std::sprintf(buf, "# n_elem at level %u", level);
560  std::string comment(buf), legend = ", [ type ";
561 
562  if (_write_unique_id)
563  legend += "uid ";
564  legend += "parent ";
565  if (write_partitioning)
566  legend += "pid ";
567  if (write_subdomain_id)
568  legend += "sid ";
569  if (write_p_level)
570  legend += "p_level ";
571  legend += "(n0 ... nN-1) ]";
572  comment += legend;
573  io.data (n_global_elem_at_level[level], comment.c_str());
574  }
575 
576  for (unsigned int pid=0; pid<this->n_processors(); pid++)
577  {
578  recv_conn.resize(xfer_buf_sizes[pid]);
579  if (pid == 0)
580  recv_conn = xfer_conn;
581  else
582  this->comm().receive (pid, recv_conn);
583 
584  // at a minimum, the buffer should contain the number of elements,
585  // which could be 0.
586  libmesh_assert (!recv_conn.empty());
587 
588  {
589  const xdr_id_type n_elem_received = recv_conn.back();
590  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
591 
592  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
593  {
594  output_buffer.clear();
595 
596  // n. nodes
597  const xdr_id_type n_nodes = *recv_conn_iter;
598  ++recv_conn_iter;
599 
600  // type
601  output_buffer.push_back(*recv_conn_iter);
602  ++recv_conn_iter;
603 
604  // unique_id
605  if (_write_unique_id)
606  output_buffer.push_back(*recv_conn_iter);
607  ++recv_conn_iter;
608 
609  // parent local id
610  const xdr_id_type parent_local_id = *recv_conn_iter;
611  ++recv_conn_iter;
612 
613  // parent processor id
614  const xdr_id_type parent_pid = *recv_conn_iter;
615  ++recv_conn_iter;
616 
617  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
618 
619  // processor id
620  if (write_partitioning)
621  output_buffer.push_back(*recv_conn_iter);
622  ++recv_conn_iter;
623 
624  // subdomain id
625  if (write_subdomain_id)
626  output_buffer.push_back(*recv_conn_iter);
627  ++recv_conn_iter;
628 
629  // p level
630  if (write_p_level)
631  output_buffer.push_back(*recv_conn_iter);
632  ++recv_conn_iter;
633 
634  for (xdr_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
635  output_buffer.push_back(*recv_conn_iter);
636 
637  io.data_stream
638  (&output_buffer[0],
639  cast_int<unsigned int>(output_buffer.size()),
640  cast_int<unsigned int>(output_buffer.size()));
641  }
642  }
643  }
644  }
645  else
646  this->comm().send (0, xfer_conn);
647 
648  // update the processor_offsets
649  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
650  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
651  for (unsigned int pid=1; pid<this->n_processors(); pid++)
652  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
653 
654  // Now, at the next level we will again iterate over local parents. However,
655  // those parents may have been written by other processors (at this step),
656  // so we need to gather them into our *_id_maps.
657  {
658  std::vector<std::vector<dof_id_type>> requested_ids(this->n_processors());
659  std::vector<dof_id_type> request_to_fill;
660 
661  it = mesh.local_level_elements_begin(level);
662  end = mesh.local_level_elements_end(level);
663 
664  for (; it!=end; ++it)
665  if (!child_id_map.count((*it)->id()))
666  {
667  libmesh_assert_not_equal_to ((*it)->parent()->processor_id(), this->processor_id());
668  requested_ids[(*it)->parent()->processor_id()].push_back((*it)->id());
669  }
670 
671  // Next set the child_ids
672  for (unsigned int p=1; p != this->n_processors(); ++p)
673  {
674  // Trade my requests with processor procup and procdown
675  unsigned int procup = (this->processor_id() + p) %
676  this->n_processors();
677  unsigned int procdown = (this->n_processors() +
678  this->processor_id() - p) %
679  this->n_processors();
680 
681  this->comm().send_receive(procup, requested_ids[procup],
682  procdown, request_to_fill);
683 
684  // Fill those requests by overwriting the requested ids
685  for (std::size_t i=0; i<request_to_fill.size(); i++)
686  {
687  libmesh_assert (child_id_map.count(request_to_fill[i]));
688  libmesh_assert_equal_to (child_id_map[request_to_fill[i]].first, procdown);
689 
690  request_to_fill[i] = child_id_map[request_to_fill[i]].second;
691  }
692 
693  // Trade back the results
694  std::vector<dof_id_type> filled_request;
695  this->comm().send_receive(procdown, request_to_fill,
696  procup, filled_request);
697 
698  libmesh_assert_equal_to (filled_request.size(), requested_ids[procup].size());
699 
700  for (std::size_t i=0; i<filled_request.size(); i++)
701  child_id_map[requested_ids[procup][i]] =
702  std::make_pair (procup,
703  filled_request[i]);
704  }
705  // overwrite the parent_id_map with the child_id_map, but
706  // use std::map::swap() for efficiency.
707  parent_id_map.swap(child_id_map);
708  child_id_map.clear();
709  }
710  }
711 #endif // LIBMESH_ENABLE_AMR
712  if (this->processor_id() == 0)
713  libmesh_assert_equal_to (next_global_elem, n_elem);
714 
715 }
716 
717 
718 
720 {
721  // convenient reference to our mesh
723  libmesh_assert_equal_to (n_nodes, mesh.n_nodes());
724 
725  std::vector<dof_id_type> xfer_ids;
726  std::vector<Real> xfer_coords;
727  std::vector<Real> & coords=xfer_coords;
728 
729  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
730  std::vector<std::vector<Real>> recv_coords(this->n_processors());
731 
732 #ifdef LIBMESH_ENABLE_UNIQUE_ID
733  std::vector<xdr_id_type> xfer_unique_ids;
734  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
735  std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
736 #endif // LIBMESH_ENABLE_UNIQUE_ID
737 
738  std::size_t n_written=0;
739 
740  for (std::size_t blk=0, last_node=0; last_node<n_nodes; blk++)
741  {
742  const std::size_t first_node = blk*io_blksize;
743  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
744 
745  // Build up the xfer buffers on each processor
746  xfer_ids.clear();
747  xfer_coords.clear();
748 #ifdef LIBMESH_ENABLE_UNIQUE_ID
749  xfer_unique_ids.clear();
750 #endif // LIBMESH_ENABLE_UNIQUE_ID
751 
752  for (const auto & node : mesh.local_node_ptr_range())
753  if ((node->id() >= first_node) && // node in [first_node, last_node)
754  (node->id() < last_node))
755  {
756  xfer_ids.push_back(node->id());
757 #ifdef LIBMESH_ENABLE_UNIQUE_ID
758  xfer_unique_ids.push_back(node->unique_id());
759 #endif // LIBMESH_ENABLE_UNIQUE_ID
760  xfer_coords.push_back((*node)(0));
761 #if LIBMESH_DIM > 1
762  xfer_coords.push_back((*node)(1));
763 #endif
764 #if LIBMESH_DIM > 2
765  xfer_coords.push_back((*node)(2));
766 #endif
767  }
768 
769  //-------------------------------------
770  // Send the xfer buffers to processor 0
771  std::vector<std::size_t> ids_size;
772 
773  const std::size_t my_ids_size = xfer_ids.size();
774 
775  // explicitly gather ids_size
776  this->comm().gather (0, my_ids_size, ids_size);
777 
778  // We will have lots of simultaneous receives if we are
779  // processor 0, so let's use nonblocking receives.
780  std::vector<Parallel::Request>
781 #ifdef LIBMESH_ENABLE_UNIQUE_ID
782  unique_id_request_handles(this->n_processors()-1),
783 #endif // LIBMESH_ENABLE_UNIQUE_ID
784  id_request_handles(this->n_processors()-1),
785  coord_request_handles(this->n_processors()-1);
786 
788 #ifdef LIBMESH_ENABLE_UNIQUE_ID
789  unique_id_tag = mesh.comm().get_unique_tag(1233),
790 #endif // LIBMESH_ENABLE_UNIQUE_ID
791  id_tag = mesh.comm().get_unique_tag(1234),
792  coord_tag = mesh.comm().get_unique_tag(1235);
793 
794  // Post the receives -- do this on processor 0 only.
795  if (this->processor_id() == 0)
796  {
797  for (unsigned int pid=0; pid<this->n_processors(); pid++)
798  {
799  recv_ids[pid].resize(ids_size[pid]);
800  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
801 #ifdef LIBMESH_ENABLE_UNIQUE_ID
802  recv_unique_ids[pid].resize(ids_size[pid]);
803 #endif // LIBMESH_ENABLE_UNIQUE_ID
804 
805  if (pid == 0)
806  {
807  recv_ids[0] = xfer_ids;
808  recv_coords[0] = xfer_coords;
809 #ifdef LIBMESH_ENABLE_UNIQUE_ID
810  recv_unique_ids[0] = xfer_unique_ids;
811 #endif // LIBMESH_ENABLE_UNIQUE_ID
812  }
813  else
814  {
815  this->comm().receive (pid, recv_ids[pid],
816  id_request_handles[pid-1],
817  id_tag);
818  this->comm().receive (pid, recv_coords[pid],
819  coord_request_handles[pid-1],
820  coord_tag);
821 #ifdef LIBMESH_ENABLE_UNIQUE_ID
822  this->comm().receive (pid, recv_unique_ids[pid],
823  unique_id_request_handles[pid-1],
824  unique_id_tag);
825 #endif // LIBMESH_ENABLE_UNIQUE_ID
826  }
827  }
828  }
829  else
830  {
831  // Send -- do this on all other processors.
832  this->comm().send(0, xfer_ids, id_tag);
833  this->comm().send(0, xfer_coords, coord_tag);
834 #ifdef LIBMESH_ENABLE_UNIQUE_ID
835  this->comm().send(0, xfer_unique_ids, unique_id_tag);
836 #endif // LIBMESH_ENABLE_UNIQUE_ID
837  }
838 
839  // -------------------------------------------------------
840  // Receive the messages and write the output on processor 0.
841  if (this->processor_id() == 0)
842  {
843  // Wait for all the receives to complete. We have no
844  // need for the statuses since we already know the
845  // buffer sizes.
846  Parallel::wait (id_request_handles);
847  Parallel::wait (coord_request_handles);
848 #ifdef LIBMESH_ENABLE_UNIQUE_ID
849  Parallel::wait (unique_id_request_handles);
850 #endif // LIBMESH_ENABLE_UNIQUE_ID
851 
852  // Write the coordinates in this block.
853  std::size_t tot_id_size=0;
854 
855  for (unsigned int pid=0; pid<this->n_processors(); pid++)
856  {
857  tot_id_size += recv_ids[pid].size();
858  libmesh_assert_equal_to(recv_coords[pid].size(),
859  recv_ids[pid].size()*LIBMESH_DIM);
860 #ifdef LIBMESH_ENABLE_UNIQUE_ID
861  libmesh_assert_equal_to
862  (recv_ids[pid].size(), recv_unique_ids[pid].size());
863 #endif // LIBMESH_ENABLE_UNIQUE_ID
864  }
865 
866  libmesh_assert_less_equal
867  (tot_id_size, std::min(io_blksize, std::size_t(n_nodes)));
868 
869  coords.resize (3*tot_id_size);
870 #ifdef LIBMESH_ENABLE_UNIQUE_ID
871  unique_ids.resize(tot_id_size);
872 #endif
873 
874  for (unsigned int pid=0; pid<this->n_processors(); pid++)
875  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
876  {
877  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
878 
879 #ifdef LIBMESH_ENABLE_UNIQUE_ID
880  libmesh_assert_less (local_idx, unique_ids.size());
881 
882  unique_ids[local_idx] = recv_unique_ids[pid][idx];
883 #endif
884 
885  libmesh_assert_less ((3*local_idx+2), coords.size());
886  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
887 
888  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
889 #if LIBMESH_DIM > 1
890  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
891 #else
892  coords[3*local_idx+1] = 0.;
893 #endif
894 #if LIBMESH_DIM > 2
895  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
896 #else
897  coords[3*local_idx+2] = 0.;
898 #endif
899 
900  n_written++;
901  }
902 
903  io.data_stream (coords.empty() ? libmesh_nullptr : &coords[0],
904  cast_int<unsigned int>(coords.size()), 3);
905  }
906  }
907 
908  if (this->processor_id() == 0)
909  libmesh_assert_equal_to (n_written, n_nodes);
910 
911 #ifdef LIBMESH_ENABLE_UNIQUE_ID
912  // XDR unsigned char doesn't work as anticipated
913  unsigned short write_unique_ids = 1;
914 #else
915  unsigned short write_unique_ids = 0;
916 #endif
917  if (this->processor_id() == 0)
918  io.data (write_unique_ids, "# presence of unique ids");
919 
920 #ifdef LIBMESH_ENABLE_UNIQUE_ID
921  n_written = 0;
922 
923  for (std::size_t blk=0, last_node=0; last_node<n_nodes; blk++)
924  {
925  const std::size_t first_node = blk*io_blksize;
926  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
927 
928  // Build up the xfer buffers on each processor
929  xfer_ids.clear();
930  xfer_unique_ids.clear();
931 
932  for (const auto & node : mesh.local_node_ptr_range())
933  if ((node->id() >= first_node) && // node in [first_node, last_node)
934  (node->id() < last_node))
935  {
936  xfer_ids.push_back(node->id());
937  xfer_unique_ids.push_back(node->unique_id());
938  }
939 
940  //-------------------------------------
941  // Send the xfer buffers to processor 0
942  std::vector<std::size_t> ids_size;
943 
944  const std::size_t my_ids_size = xfer_ids.size();
945 
946  // explicitly gather ids_size
947  this->comm().gather (0, my_ids_size, ids_size);
948 
949  // We will have lots of simultaneous receives if we are
950  // processor 0, so let's use nonblocking receives.
951  std::vector<Parallel::Request>
952  unique_id_request_handles(this->n_processors()-1),
953  id_request_handles(this->n_processors()-1);
954 
956  unique_id_tag = mesh.comm().get_unique_tag(1236),
957  id_tag = mesh.comm().get_unique_tag(1237);
958 
959  // Post the receives -- do this on processor 0 only.
960  if (this->processor_id() == 0)
961  {
962  for (unsigned int pid=0; pid<this->n_processors(); pid++)
963  {
964  recv_ids[pid].resize(ids_size[pid]);
965  recv_unique_ids[pid].resize(ids_size[pid]);
966 
967  if (pid == 0)
968  {
969  recv_ids[0] = xfer_ids;
970  recv_unique_ids[0] = xfer_unique_ids;
971  }
972  else
973  {
974  this->comm().receive (pid, recv_ids[pid],
975  id_request_handles[pid-1],
976  id_tag);
977  this->comm().receive (pid, recv_unique_ids[pid],
978  unique_id_request_handles[pid-1],
979  unique_id_tag);
980  }
981  }
982  }
983  else
984  {
985  // Send -- do this on all other processors.
986  this->comm().send(0, xfer_ids, id_tag);
987  this->comm().send(0, xfer_unique_ids, unique_id_tag);
988  }
989 
990  // -------------------------------------------------------
991  // Receive the messages and write the output on processor 0.
992  if (this->processor_id() == 0)
993  {
994  // Wait for all the receives to complete. We have no
995  // need for the statuses since we already know the
996  // buffer sizes.
997  Parallel::wait (id_request_handles);
998  Parallel::wait (unique_id_request_handles);
999 
1000  // Write the unique ids in this block.
1001  std::size_t tot_id_size=0;
1002 
1003  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1004  {
1005  tot_id_size += recv_ids[pid].size();
1006  libmesh_assert_equal_to
1007  (recv_ids[pid].size(), recv_unique_ids[pid].size());
1008  }
1009 
1010  libmesh_assert_less_equal
1011  (tot_id_size, std::min(io_blksize, std::size_t(n_nodes)));
1012 
1013  unique_ids.resize(tot_id_size);
1014 
1015  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1016  for (std::size_t idx=0; idx<recv_ids[pid].size(); idx++)
1017  {
1018  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1019 
1020  libmesh_assert_less (local_idx, unique_ids.size());
1021 
1022  unique_ids[local_idx] = recv_unique_ids[pid][idx];
1023 
1024  n_written++;
1025  }
1026 
1027  io.data_stream (unique_ids.empty() ? libmesh_nullptr : &unique_ids[0],
1028  cast_int<unsigned int>(unique_ids.size()), 1);
1029  }
1030  }
1031 
1032  if (this->processor_id() == 0)
1033  libmesh_assert_equal_to (n_written, n_nodes);
1034 
1035 #endif // LIBMESH_ENABLE_UNIQUE_ID
1036 }
1037 
1038 
1039 
1040 void XdrIO::write_serialized_bcs_helper (Xdr & io, const new_header_id_type n_bcs, const std::string bc_type) const
1041 {
1042  libmesh_assert (io.writing());
1043 
1044  // convenient reference to our mesh
1046 
1047  // and our boundary info object
1048  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1049 
1050  // Version 0.9.2+ introduces entity names
1051  write_serialized_bc_names(io, boundary_info, true); // sideset names
1052 
1053  new_header_id_type n_bcs_out = n_bcs;
1054  if (this->processor_id() == 0)
1055  {
1056  std::stringstream comment_string;
1057  comment_string << "# number of " << bc_type << " boundary conditions";
1058  io.data (n_bcs_out, comment_string.str().c_str());
1059  }
1060  n_bcs_out = 0;
1061 
1062  if (!n_bcs) return;
1063 
1064  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1065  std::vector<std::size_t> bc_sizes(this->n_processors());
1066 
1067  // Boundary conditions are only specified for level-0 elements
1069  it = mesh.local_level_elements_begin(0),
1070  end = mesh.local_level_elements_end(0);
1071 
1072  // Container to catch boundary IDs handed back by BoundaryInfo
1073  std::vector<boundary_id_type> bc_ids;
1074 
1075  dof_id_type n_local_level_0_elem=0;
1076  for (; it!=end; ++it, n_local_level_0_elem++)
1077  {
1078  const Elem * elem = *it;
1079 
1080  if (bc_type == "side")
1081  {
1082  for (auto s : elem->side_index_range())
1083  {
1084  boundary_info.boundary_ids (elem, s, bc_ids);
1085  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1086  {
1087  const boundary_id_type bc_id = *id_it;
1088  if (bc_id != BoundaryInfo::invalid_id)
1089  {
1090  xfer_bcs.push_back (n_local_level_0_elem);
1091  xfer_bcs.push_back (s) ;
1092  xfer_bcs.push_back (bc_id);
1093  }
1094  }
1095  }
1096  }
1097  else if (bc_type == "edge")
1098  {
1099  for (auto e : elem->edge_index_range())
1100  {
1101  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1102  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1103  {
1104  const boundary_id_type bc_id = *id_it;
1105  if (bc_id != BoundaryInfo::invalid_id)
1106  {
1107  xfer_bcs.push_back (n_local_level_0_elem);
1108  xfer_bcs.push_back (e) ;
1109  xfer_bcs.push_back (bc_id);
1110  }
1111  }
1112  }
1113  }
1114  else if (bc_type == "shellface")
1115  {
1116  for (unsigned short sf=0; sf<2; sf++)
1117  {
1118  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1119  for (std::vector<boundary_id_type>::const_iterator id_it=bc_ids.begin(); id_it!=bc_ids.end(); ++id_it)
1120  {
1121  const boundary_id_type bc_id = *id_it;
1122  if (bc_id != BoundaryInfo::invalid_id)
1123  {
1124  xfer_bcs.push_back (n_local_level_0_elem);
1125  xfer_bcs.push_back (sf) ;
1126  xfer_bcs.push_back (bc_id);
1127  }
1128  }
1129  }
1130  }
1131  else
1132  {
1133  libmesh_error_msg("bc_type not recognized: " + bc_type);
1134  }
1135  }
1136 
1137  xfer_bcs.push_back(n_local_level_0_elem);
1138  std::size_t my_size = xfer_bcs.size();
1139  this->comm().gather (0, my_size, bc_sizes);
1140 
1141  // All processors send their xfer buffers to processor 0
1142  // Processor 0 will receive all buffers and write out the bcs
1143  if (this->processor_id() == 0)
1144  {
1145  dof_id_type elem_offset = 0;
1146  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1147  {
1148  recv_bcs.resize(bc_sizes[pid]);
1149  if (pid == 0)
1150  recv_bcs = xfer_bcs;
1151  else
1152  this->comm().receive (pid, recv_bcs);
1153 
1154  const dof_id_type my_n_local_level_0_elem
1155  = cast_int<dof_id_type>(recv_bcs.back());
1156  recv_bcs.pop_back();
1157 
1158  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 3, n_bcs_out++)
1159  recv_bcs[idx+0] += elem_offset;
1160 
1161  io.data_stream (recv_bcs.empty() ? libmesh_nullptr : &recv_bcs[0],
1162  cast_int<unsigned int>(recv_bcs.size()), 3);
1163  elem_offset += my_n_local_level_0_elem;
1164  }
1165  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1166  }
1167  else
1168  this->comm().send (0, xfer_bcs);
1169 }
1170 
1171 
1172 
1173 void XdrIO::write_serialized_side_bcs (Xdr & io, const new_header_id_type n_side_bcs) const
1174 {
1175  write_serialized_bcs_helper(io, n_side_bcs, "side");
1176 }
1177 
1178 
1179 
1180 void XdrIO::write_serialized_edge_bcs (Xdr & io, const new_header_id_type n_edge_bcs) const
1181 {
1182  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1183 }
1184 
1185 
1186 
1187 void XdrIO::write_serialized_shellface_bcs (Xdr & io, const new_header_id_type n_shellface_bcs) const
1188 {
1189  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1190 }
1191 
1192 
1193 
1194 void XdrIO::write_serialized_nodesets (Xdr & io, const new_header_id_type n_nodesets) const
1195 {
1196  libmesh_assert (io.writing());
1197 
1198  // convenient reference to our mesh
1200 
1201  // and our boundary info object
1202  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1203 
1204  // Version 0.9.2+ introduces entity names
1205  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1206 
1207  new_header_id_type n_nodesets_out = n_nodesets;
1208  if (this->processor_id() == 0)
1209  io.data (n_nodesets_out, "# number of nodesets");
1210  n_nodesets_out = 0;
1211 
1212  if (!n_nodesets) return;
1213 
1214  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1215  std::vector<std::size_t> bc_sizes(this->n_processors());
1216 
1217  // Container to catch boundary IDs handed back by BoundaryInfo
1218  std::vector<boundary_id_type> nodeset_ids;
1219 
1220  dof_id_type n_node=0;
1221  for (const auto & node : mesh.local_node_ptr_range())
1222  {
1223  boundary_info.boundary_ids (node, nodeset_ids);
1224  for (std::vector<boundary_id_type>::const_iterator id_it=nodeset_ids.begin(); id_it!=nodeset_ids.end(); ++id_it)
1225  {
1226  const boundary_id_type bc_id = *id_it;
1227  if (bc_id != BoundaryInfo::invalid_id)
1228  {
1229  xfer_bcs.push_back (node->id());
1230  xfer_bcs.push_back (bc_id);
1231  }
1232  }
1233  }
1234 
1235  xfer_bcs.push_back(n_node);
1236  std::size_t my_size = xfer_bcs.size();
1237  this->comm().gather (0, my_size, bc_sizes);
1238 
1239  // All processors send their xfer buffers to processor 0
1240  // Processor 0 will receive all buffers and write out the bcs
1241  if (this->processor_id() == 0)
1242  {
1243  dof_id_type node_offset = 0;
1244  for (unsigned int pid=0; pid<this->n_processors(); pid++)
1245  {
1246  recv_bcs.resize(bc_sizes[pid]);
1247  if (pid == 0)
1248  recv_bcs = xfer_bcs;
1249  else
1250  this->comm().receive (pid, recv_bcs);
1251 
1252  const dof_id_type my_n_node =
1253  cast_int<dof_id_type>(recv_bcs.back());
1254  recv_bcs.pop_back();
1255 
1256  for (std::size_t idx=0; idx<recv_bcs.size(); idx += 2, n_nodesets_out++)
1257  recv_bcs[idx+0] += node_offset;
1258 
1259  io.data_stream (recv_bcs.empty() ? libmesh_nullptr : &recv_bcs[0],
1260  cast_int<unsigned int>(recv_bcs.size()), 2);
1261  node_offset += my_n_node;
1262  }
1263  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1264  }
1265  else
1266  this->comm().send (0, xfer_bcs);
1267 }
1268 
1269 
1270 
1271 void XdrIO::write_serialized_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
1272 {
1273  if (this->processor_id() == 0)
1274  {
1275  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1277 
1278  std::vector<new_header_id_type> boundary_ids;
1279  boundary_ids.reserve(boundary_map.size());
1280 
1281  std::vector<std::string> boundary_names;
1282  boundary_names.reserve(boundary_map.size());
1283 
1284  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1285  // return writable references in boundary_info, it's possible for the user to leave some entity names
1286  // blank. We can't write those to the XDA file.
1287  new_header_id_type n_boundary_names = 0;
1288  std::map<boundary_id_type, std::string>::const_iterator it_end = boundary_map.end();
1289  for (std::map<boundary_id_type, std::string>::const_iterator it = boundary_map.begin(); it != it_end; ++it)
1290  {
1291  if (!it->second.empty())
1292  {
1293  n_boundary_names++;
1294  boundary_ids.push_back(it->first);
1295  boundary_names.push_back(it->second);
1296  }
1297  }
1298 
1299  if (is_sideset)
1300  io.data(n_boundary_names, "# sideset id to name map");
1301  else
1302  io.data(n_boundary_names, "# nodeset id to name map");
1303  // Write out the ids and names in two vectors
1304  if (n_boundary_names)
1305  {
1306  io.data(boundary_ids);
1307  io.data(boundary_names);
1308  }
1309  }
1310 }
1311 
1312 
1313 
1314 void XdrIO::read (const std::string & name)
1315 {
1316  LOG_SCOPE("read()","XdrIO");
1317 
1318  // Only open the file on processor 0 -- this is especially important because
1319  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1320  // calls will produce a race condition.
1321  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1322 
1323  // convenient reference to our mesh
1325 
1326  // get the version string.
1327  if (this->processor_id() == 0)
1328  io.data (this->version());
1329  this->comm().broadcast (this->version());
1330 
1331  // note that for "legacy" files the first entry is an
1332  // integer -- not a string at all.
1333  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1334 
1335  // Check for a legacy version format.
1336  if (this->legacy())
1337  libmesh_error_msg("We no longer support reading files in the legacy format.");
1338 
1339  // Read headers with the old id type if they're pre-1.3.0, or with
1340  // the new id type if they're post-1.3.0
1341  std::vector<new_header_id_type> meta_data(10, sizeof(xdr_id_type));
1342  if (this->version_at_least_1_3_0())
1343  {
1344  this->read_header(io, meta_data);
1345  }
1346  else
1347  {
1348  std::vector<old_header_id_type> old_data(10, sizeof(xdr_id_type));
1349 
1350  this->read_header(io, old_data);
1351 
1352  meta_data.assign(old_data.begin(), old_data.end());
1353  }
1354 
1355  const new_header_id_type & n_elem = meta_data[0];
1356  const new_header_id_type & n_nodes = meta_data[1];
1357 
1363  if (version_at_least_0_9_2())
1364  _field_width = meta_data[2];
1365 
1366  // On systems where uint64_t==unsigned long, we were previously
1367  // writing 64-bit unsigned integers via xdr_u_long(), a function
1368  // which is literally less suited for that task than abort() would
1369  // have been, because at least abort() would have *known* it
1370  // couldn't write rather than truncating writes to 32 bits.
1371  //
1372  // If we have files with version < 1.3.0, then we'll continue to use
1373  // 32 bit field width, regardless of whether the file thinks we
1374  // should, whenever we're on a system where the problem would have
1375  // occurred.
1376  if ((_field_width == 4) ||
1377  (!version_at_least_1_3_0() &&
1379  {
1380  uint32_t type_size = 0;
1381 
1382  // read subdomain names
1384 
1385  // read connectivity
1386  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1387 
1388  // read the nodal locations
1389  this->read_serialized_nodes (io, n_nodes);
1390 
1391  // read the side boundary conditions
1392  this->read_serialized_side_bcs (io, type_size);
1393 
1394  if (version_at_least_0_9_2())
1395  // read the nodesets
1396  this->read_serialized_nodesets (io, type_size);
1397 
1398  if (version_at_least_1_1_0())
1399  {
1400  // read the edge boundary conditions
1401  this->read_serialized_edge_bcs (io, type_size);
1402 
1403  // read the "shell face" boundary conditions
1404  this->read_serialized_shellface_bcs (io, type_size);
1405  }
1406  }
1407  else if (_field_width == 8)
1408  {
1409  uint64_t type_size = 0;
1410 
1411  // read subdomain names
1413 
1414  // read connectivity
1415  this->read_serialized_connectivity (io, n_elem, meta_data, type_size);
1416 
1417  // read the nodal locations
1418  this->read_serialized_nodes (io, n_nodes);
1419 
1420  // read the boundary conditions
1421  this->read_serialized_side_bcs (io, type_size);
1422 
1423  if (version_at_least_0_9_2())
1424  // read the nodesets
1425  this->read_serialized_nodesets (io, type_size);
1426 
1427  if (version_at_least_1_1_0())
1428  {
1429  // read the edge boundary conditions
1430  this->read_serialized_edge_bcs (io, type_size);
1431 
1432  // read the "shell face" boundary conditions
1433  this->read_serialized_shellface_bcs (io, type_size);
1434  }
1435  }
1436 
1437  // set the node processor ids
1439 }
1440 
1441 
1442 
1443 template <typename T>
1444 void XdrIO::read_header (Xdr & io, std::vector<T> & meta_data)
1445 {
1446  LOG_SCOPE("read_header()","XdrIO");
1447 
1448  // convenient reference to our mesh
1450 
1451  if (this->processor_id() == 0)
1452  {
1453  unsigned int pos=0;
1454 
1455  io.data (meta_data[pos++]);
1456  io.data (meta_data[pos++]);
1457  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1458  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1459  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1460  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1461 
1462  if (version_at_least_0_9_2())
1463  {
1464  io.data (meta_data[pos++], "# type size");
1465  io.data (meta_data[pos++], "# uid size");
1466  io.data (meta_data[pos++], "# pid size");
1467  io.data (meta_data[pos++], "# sid size");
1468  io.data (meta_data[pos++], "# p-level size");
1469  // Boundary Condition sizes
1470  io.data (meta_data[pos++], "# eid size"); // elem id
1471  io.data (meta_data[pos++], "# side size"); // side number
1472  io.data (meta_data[pos++], "# bid size"); // boundary id
1473  }
1474  }
1475 
1476  // broadcast the n_elems, n_nodes, and size information
1477  this->comm().broadcast (meta_data);
1478 
1479  this->comm().broadcast (this->boundary_condition_file_name());
1480  this->comm().broadcast (this->subdomain_map_file_name());
1481  this->comm().broadcast (this->partition_map_file_name());
1482  this->comm().broadcast (this->polynomial_level_file_name());
1483 
1484  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1485  // this may allow for efficient adding of nodes/elements.
1486  const T & n_elem = meta_data[0];
1487  const T & n_nodes = meta_data[1];
1488 
1489  mesh.reserve_elem(n_elem);
1490  mesh.reserve_nodes(n_nodes);
1491 
1492  // Our mesh is pre-partitioned as it's created
1493  this->set_n_partitions(this->n_processors());
1494 
1500  if (version_at_least_0_9_2())
1501  _field_width = meta_data[2];
1502 }
1503 
1504 
1505 
1507 {
1508  const bool read_entity_info = version_at_least_0_9_2();
1509  const bool use_new_header_type (this->version_at_least_1_3_0());
1510  if (read_entity_info)
1511  {
1513 
1514  new_header_id_type n_subdomain_names = 0;
1515  std::vector<new_header_id_type> subdomain_ids;
1516  std::vector<std::string> subdomain_names;
1517 
1518  // Read the sideset names
1519  if (this->processor_id() == 0)
1520  {
1521  if (use_new_header_type)
1522  io.data(n_subdomain_names);
1523  else
1524  {
1525  old_header_id_type temp;
1526  io.data(temp);
1527  n_subdomain_names = temp;
1528  }
1529 
1530  subdomain_ids.resize(n_subdomain_names);
1531  subdomain_names.resize(n_subdomain_names);
1532 
1533  if (n_subdomain_names)
1534  {
1535  if (use_new_header_type)
1536  io.data(subdomain_ids);
1537  else
1538  {
1539  std::vector<old_header_id_type> temp;
1540  io.data(temp);
1541  subdomain_ids.assign(temp.begin(), temp.end());
1542  }
1543 
1544  io.data(subdomain_names);
1545  }
1546  }
1547 
1548  // Broadcast the subdomain names to all processors
1549  this->comm().broadcast(n_subdomain_names);
1550  if (n_subdomain_names == 0)
1551  return;
1552 
1553  subdomain_ids.resize(n_subdomain_names);
1554  subdomain_names.resize(n_subdomain_names);
1555  this->comm().broadcast(subdomain_ids);
1556  this->comm().broadcast(subdomain_names);
1557 
1558  // Reassemble the named subdomain information
1559  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1560 
1561  for (unsigned int i=0; i<n_subdomain_names; ++i)
1562  subdomain_map.insert(std::make_pair(subdomain_ids[i], subdomain_names[i]));
1563  }
1564 }
1565 
1566 
1567 template <typename T>
1568 void XdrIO::read_serialized_connectivity (Xdr & io, const dof_id_type n_elem, std::vector<new_header_id_type> & sizes, T)
1569 {
1570  libmesh_assert (io.reading());
1571 
1572  if (!n_elem) return;
1573 
1574  const bool
1575  read_p_level = ("." == this->polynomial_level_file_name()),
1576  read_partitioning = ("." == this->partition_map_file_name()),
1577  read_subdomain_id = ("." == this->subdomain_map_file_name());
1578 
1579  // convenient reference to our mesh
1581 
1582  // Keep track of what kinds of elements this file contains
1583  elems_of_dimension.clear();
1584  elems_of_dimension.resize(4, false);
1585 
1586  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1587 
1588  int level=-1;
1589 
1590  // Version 0.9.2+ introduces unique ids
1591  const size_t unique_id_size_index = 3;
1592 
1593  const bool read_unique_id =
1594  (version_at_least_0_9_2()) &&
1595  sizes[unique_id_size_index];
1596 
1597  T n_elem_at_level=0, n_processed_at_level=0;
1598  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1599  last_elem<n_elem; blk++)
1600  {
1601  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1602  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1603  cast_int<std::size_t>(n_elem)));
1604 
1605  conn.clear();
1606 
1607  if (this->processor_id() == 0)
1608  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1609  {
1610  if (n_processed_at_level == n_elem_at_level)
1611  {
1612  // get the number of elements to read at this level
1613  io.data (n_elem_at_level);
1614  n_processed_at_level = 0;
1615  level++;
1616  }
1617 
1618  unsigned int pos = 0;
1619  // get the element type,
1620  io.data_stream (&input_buffer[pos++], 1);
1621 
1622  if (read_unique_id)
1623  io.data_stream (&input_buffer[pos++], 1);
1624  // Older versions won't have this field at all (no increment on pos)
1625 
1626  // maybe the parent
1627  if (level)
1628  io.data_stream (&input_buffer[pos++], 1);
1629  else
1630  // We can't always fit DofObject::invalid_id in an
1631  // xdr_id_type
1632  input_buffer[pos++] = static_cast<T>(-1);
1633 
1634  // maybe the processor id
1635  if (read_partitioning)
1636  io.data_stream (&input_buffer[pos++], 1);
1637  else
1638  input_buffer[pos++] = 0;
1639 
1640  // maybe the subdomain id
1641  if (read_subdomain_id)
1642  io.data_stream (&input_buffer[pos++], 1);
1643  else
1644  input_buffer[pos++] = 0;
1645 
1646  // maybe the p level
1647  if (read_p_level)
1648  io.data_stream (&input_buffer[pos++], 1);
1649  else
1650  input_buffer[pos++] = 0;
1651 
1652  // and all the nodes
1653  libmesh_assert_less (pos+Elem::type_to_n_nodes_map[input_buffer[0]], input_buffer.size());
1654  io.data_stream (&input_buffer[pos], Elem::type_to_n_nodes_map[input_buffer[0]]);
1655  conn.insert (conn.end(),
1656  input_buffer.begin(),
1657  input_buffer.begin() + pos + Elem::type_to_n_nodes_map[input_buffer[0]]);
1658  }
1659 
1660  std::size_t conn_size = conn.size();
1661  this->comm().broadcast(conn_size);
1662  conn.resize (conn_size);
1663  this->comm().broadcast (conn);
1664 
1665  // All processors now have the connectivity for this block.
1666  typename std::vector<T>::const_iterator it = conn.begin();
1667  for (dof_id_type e=first_elem; e<last_elem; e++)
1668  {
1669  const ElemType elem_type = static_cast<ElemType>(*it); ++it;
1670 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1671  // We are on all processors here, so we can easily assign
1672  // consistent unique ids if the file doesn't specify them
1673  // later.
1674  unique_id_type unique_id = e;
1675 #endif
1676  if (read_unique_id)
1677  {
1678 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1679  unique_id = cast_int<unique_id_type>(*it);
1680 #endif
1681  ++it;
1682  }
1683  const dof_id_type parent_id =
1684  (*it == static_cast<T>(-1)) ?
1686  cast_int<dof_id_type>(*it);
1687  ++it;
1688  const processor_id_type proc_id =
1689  cast_int<processor_id_type>(*it);
1690  ++it;
1691  const subdomain_id_type subdomain_id =
1692  cast_int<subdomain_id_type>(*it);
1693  ++it;
1694 #ifdef LIBMESH_ENABLE_AMR
1695  const unsigned int p_level =
1696  cast_int<unsigned int>(*it);
1697 #endif
1698  ++it;
1699 
1700  Elem * parent = (parent_id == DofObject::invalid_id) ?
1701  libmesh_nullptr : mesh.elem_ptr(parent_id);
1702 
1703  Elem * elem = Elem::build (elem_type, parent).release();
1704 
1705  elem->set_id() = e;
1706 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1707  elem->set_unique_id() = unique_id;
1708 #endif
1709  elem->processor_id() = proc_id;
1710  elem->subdomain_id() = subdomain_id;
1711 #ifdef LIBMESH_ENABLE_AMR
1712  elem->hack_p_level(p_level);
1713 
1714  if (parent)
1715  {
1716  parent->add_child(elem);
1719  }
1720 #endif
1721 
1722  for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1723  n++, ++it)
1724  {
1725  const dof_id_type global_node_number =
1726  cast_int<dof_id_type>(*it);
1727 
1728  elem->set_node(n) =
1729  mesh.add_point (Point(), global_node_number);
1730  }
1731 
1732  elems_of_dimension[elem->dim()] = true;
1733  mesh.add_elem(elem);
1734  }
1735  }
1736 
1737  // Set the mesh dimension to the largest encountered for an element
1738  for (unsigned char i=0; i!=4; ++i)
1739  if (elems_of_dimension[i])
1740  mesh.set_mesh_dimension(i);
1741 
1742 #if LIBMESH_DIM < 3
1743  if (mesh.mesh_dimension() > LIBMESH_DIM)
1744  libmesh_error_msg("Cannot open dimension " \
1745  << mesh.mesh_dimension() \
1746  << " mesh file when configured without " \
1747  << mesh.mesh_dimension() \
1748  << "D support.");
1749 #endif
1750 }
1751 
1752 
1753 
1755 {
1756  libmesh_assert (io.reading());
1757 
1758  // convenient reference to our mesh
1760 
1761  if (!mesh.n_nodes()) return;
1762 
1763  // At this point the elements have been read from file and placeholder nodes
1764  // have been assigned. These nodes, however, do not have the proper (x,y,z)
1765  // locations or unique_id values. This method will read all the
1766  // nodes from disk, and each processor can then grab the individual
1767  // values it needs.
1768 
1769  // If the file includes unique ids for nodes (as indicated by a
1770  // flag in 0.9.6+ files), those will be read next.
1771 
1772  // build up a list of the nodes contained in our local mesh. These are the nodes
1773  // stored on the local processor whose (x,y,z) and unique_id values
1774  // need to be corrected.
1775  std::vector<dof_id_type> needed_nodes; needed_nodes.reserve (mesh.n_nodes());
1776  {
1777  for (auto & node : mesh.node_ptr_range())
1778  needed_nodes.push_back(node->id());
1779 
1780  std::sort (needed_nodes.begin(), needed_nodes.end());
1781 
1782  // We should not have any duplicate node->id()s
1783  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
1784  }
1785 
1786  // Get the nodes in blocks.
1787  std::vector<Real> coords;
1788  std::pair<std::vector<dof_id_type>::iterator,
1789  std::vector<dof_id_type>::iterator> pos;
1790  pos.first = needed_nodes.begin();
1791 
1792  // Broadcast node coordinates
1793  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1794  {
1795  first_node = blk*io_blksize;
1796  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1797 
1798  coords.resize(3*(last_node - first_node));
1799 
1800  if (this->processor_id() == 0)
1801  io.data_stream (coords.empty() ? libmesh_nullptr : &coords[0],
1802  cast_int<unsigned int>(coords.size()));
1803 
1804  // For large numbers of processors the majority of processors at any given
1805  // block may not actually need these data. It may be worth profiling this,
1806  // although it is expected that disk IO will be the bottleneck
1807  this->comm().broadcast (coords);
1808 
1809  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
1810  {
1811  // first see if we need this node. use pos.first as a smart lower
1812  // bound, this will ensure that the size of the searched range
1813  // decreases as we match nodes.
1814  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1815 
1816  if (pos.first != pos.second) // we need this node.
1817  {
1818  libmesh_assert_equal_to (*pos.first, n);
1819  mesh.node_ref(cast_int<dof_id_type>(n)) =
1820  Point (coords[idx+0],
1821  coords[idx+1],
1822  coords[idx+2]);
1823 
1824  }
1825  }
1826  }
1827 
1828  if (version_at_least_0_9_6())
1829  {
1830  // Check for node unique ids
1831  unsigned short read_unique_ids;
1832 
1833  if (this->processor_id() == 0)
1834  io.data (read_unique_ids);
1835 
1836  this->comm().broadcast (read_unique_ids);
1837 
1838  // If no unique ids are in the file, we're done.
1839  if (!read_unique_ids)
1840  return;
1841 
1842  std::vector<uint32_t> unique_32;
1843  std::vector<uint64_t> unique_64;
1844 
1845  // We're starting over from node 0 again
1846  pos.first = needed_nodes.begin();
1847 
1848  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1849  {
1850  first_node = blk*io_blksize;
1851  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1852 
1853  libmesh_assert((_field_width == 8) || (_field_width == 4));
1854 
1855  if (_field_width == 8)
1856  unique_64.resize(last_node - first_node);
1857  else
1858  unique_32.resize(last_node - first_node);
1859 
1860  if (this->processor_id() == 0)
1861  {
1862  if (_field_width == 8)
1863  io.data_stream (unique_64.empty() ? libmesh_nullptr : &unique_64[0],
1864  cast_int<unsigned int>(unique_64.size()));
1865  else
1866  io.data_stream (unique_32.empty() ? libmesh_nullptr : &unique_32[0],
1867  cast_int<unsigned int>(unique_32.size()));
1868  }
1869 
1870 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1871  if (_field_width == 8)
1872  this->comm().broadcast (unique_64);
1873  else
1874  this->comm().broadcast (unique_32);
1875 
1876  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
1877  {
1878  // first see if we need this node. use pos.first as a smart lower
1879  // bound, this will ensure that the size of the searched range
1880  // decreases as we match nodes.
1881  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1882 
1883  if (pos.first != pos.second) // we need this node.
1884  {
1885  libmesh_assert_equal_to (*pos.first, n);
1886  if (_field_width == 8)
1887  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1888  = unique_64[idx];
1889  else
1890  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1891  = unique_32[idx];
1892  }
1893  }
1894 #endif // LIBMESH_ENABLE_UNIQUE_ID
1895  }
1896  }
1897 }
1898 
1899 
1900 
1901 template <typename T>
1902 void XdrIO::read_serialized_bcs_helper (Xdr & io, T, const std::string bc_type)
1903 {
1904  if (this->boundary_condition_file_name() == "n/a") return;
1905 
1906  libmesh_assert (io.reading());
1907 
1908  // convenient reference to our mesh
1910 
1911  // and our boundary info object
1912  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1913 
1914  // Version 0.9.2+ introduces unique ids
1915  read_serialized_bc_names(io, boundary_info, true); // sideset names
1916 
1917  std::vector<DofBCData> dof_bc_data;
1918  std::vector<T> input_buffer;
1919 
1920  new_header_id_type n_bcs=0;
1921  if (this->processor_id() == 0)
1922  {
1923  if (this->version_at_least_1_3_0())
1924  io.data (n_bcs);
1925  else
1926  {
1927  old_header_id_type temp;
1928  io.data (temp);
1929  n_bcs = temp;
1930  }
1931  }
1932  this->comm().broadcast (n_bcs);
1933 
1934  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
1935  {
1936  first_bc = blk*io_blksize;
1937  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
1938 
1939  input_buffer.resize (3*(last_bc - first_bc));
1940 
1941  if (this->processor_id() == 0)
1942  io.data_stream (input_buffer.empty() ? libmesh_nullptr : &input_buffer[0],
1943  cast_int<unsigned int>(input_buffer.size()));
1944 
1945  this->comm().broadcast (input_buffer);
1946  dof_bc_data.clear();
1947  dof_bc_data.reserve (input_buffer.size()/3);
1948 
1949  // convert the input_buffer to DofBCData to facilitate searching
1950  for (std::size_t idx=0; idx<input_buffer.size(); idx+=3)
1951  dof_bc_data.push_back
1952  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
1953  cast_int<unsigned short>(input_buffer[idx+1]),
1954  cast_int<boundary_id_type>(input_buffer[idx+2])));
1955  input_buffer.clear();
1956  // note that while the files *we* write should already be sorted by
1957  // element id this is not necessarily guaranteed.
1958  std::sort (dof_bc_data.begin(), dof_bc_data.end());
1959 
1961  it = mesh.level_elements_begin(0),
1962  end = mesh.level_elements_end(0);
1963 
1964  // Look for BCs in this block for all the level-0 elements we have
1965  // (not just local ones). Do this by finding all the entries
1966  // in dof_bc_data whose elem_id match the ID of the current element.
1967  // We cannot rely on libmesh_nullptr neighbors at this point since the neighbor
1968  // data structure has not been initialized.
1969  for (; it!=end; ++it)
1970  {
1971  std::pair<std::vector<DofBCData>::iterator,
1972  std::vector<DofBCData>::iterator> bounds =
1973  std::equal_range (dof_bc_data.begin(),
1974  dof_bc_data.end(),
1975  (*it)->id()
1976 #if defined(__SUNPRO_CC) || defined(__PGI)
1977  , CompareIntDofBCData()
1978 #endif
1979  );
1980 
1981  for (const auto & data : as_range(bounds))
1982  {
1983  libmesh_assert_equal_to (data.dof_id, (*it)->id());
1984 
1985  if (bc_type == "side")
1986  {
1987  libmesh_assert_less (data.side, (*it)->n_sides());
1988  boundary_info.add_side (*it, data.side, data.bc_id);
1989  }
1990  else if (bc_type == "edge")
1991  {
1992  libmesh_assert_less (data.side, (*it)->n_edges());
1993  boundary_info.add_edge (*it, data.side, data.bc_id);
1994  }
1995  else if (bc_type == "shellface")
1996  {
1997  // Shell face IDs can only be 0 or 1.
1998  libmesh_assert_less(data.side, 2);
1999 
2000  boundary_info.add_shellface (*it, data.side, data.bc_id);
2001  }
2002  else
2003  {
2004  libmesh_error_msg("bc_type not recognized: " + bc_type);
2005  }
2006  }
2007  }
2008  }
2009 }
2010 
2011 
2012 
2013 template <typename T>
2015 {
2016  read_serialized_bcs_helper(io, value, "side");
2017 }
2018 
2019 
2020 
2021 template <typename T>
2023 {
2024  read_serialized_bcs_helper(io, value, "edge");
2025 }
2026 
2027 
2028 
2029 template <typename T>
2031 {
2032  read_serialized_bcs_helper(io, value, "shellface");
2033 }
2034 
2035 
2036 
2037 template <typename T>
2039 {
2040  if (this->boundary_condition_file_name() == "n/a") return;
2041 
2042  libmesh_assert (io.reading());
2043 
2044  // convenient reference to our mesh
2046 
2047  // and our boundary info object
2048  BoundaryInfo & boundary_info = mesh.get_boundary_info();
2049 
2050  // Version 0.9.2+ introduces unique ids
2051  read_serialized_bc_names(io, boundary_info, false); // nodeset names
2052 
2053  // TODO: Make a data object that works with both the element and nodal bcs
2054  std::vector<DofBCData> node_bc_data;
2055  std::vector<T> input_buffer;
2056 
2057  new_header_id_type n_nodesets=0;
2058  if (this->processor_id() == 0)
2059  {
2060  if (this->version_at_least_1_3_0())
2061  io.data (n_nodesets);
2062  else
2063  {
2064  old_header_id_type temp;
2065  io.data (temp);
2066  n_nodesets = temp;
2067  }
2068  }
2069  this->comm().broadcast (n_nodesets);
2070 
2071  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
2072  {
2073  first_bc = blk*io_blksize;
2074  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
2075 
2076  input_buffer.resize (2*(last_bc - first_bc));
2077 
2078  if (this->processor_id() == 0)
2079  io.data_stream (input_buffer.empty() ? libmesh_nullptr : &input_buffer[0],
2080  cast_int<unsigned int>(input_buffer.size()));
2081 
2082  this->comm().broadcast (input_buffer);
2083  node_bc_data.clear();
2084  node_bc_data.reserve (input_buffer.size()/2);
2085 
2086  // convert the input_buffer to DofBCData to facilitate searching
2087  for (std::size_t idx=0; idx<input_buffer.size(); idx+=2)
2088  node_bc_data.push_back
2089  (DofBCData(cast_int<dof_id_type>(input_buffer[idx+0]),
2090  0,
2091  cast_int<boundary_id_type>(input_buffer[idx+1])));
2092  input_buffer.clear();
2093  // note that while the files *we* write should already be sorted by
2094  // node id this is not necessarily guaranteed.
2095  std::sort (node_bc_data.begin(), node_bc_data.end());
2096 
2097  // Look for BCs in this block for all nodes we have
2098  // (not just local ones). Do this by finding all the entries
2099  // in node_bc_data whose dof_id(node_id) match the ID of the current node.
2100  for (auto & node : mesh.node_ptr_range())
2101  {
2102  std::pair<std::vector<DofBCData>::iterator,
2103  std::vector<DofBCData>::iterator> bounds =
2104  std::equal_range (node_bc_data.begin(),
2105  node_bc_data.end(),
2106  node->id()
2107 #if defined(__SUNPRO_CC) || defined(__PGI)
2108  , CompareIntDofBCData()
2109 #endif
2110  );
2111 
2112  for (const auto & data : as_range(bounds))
2113  {
2114  // Note: dof_id from ElmeBCData is being used to hold node_id here
2115  libmesh_assert_equal_to (data.dof_id, node->id());
2116 
2117  boundary_info.add_node (node, data.bc_id);
2118  }
2119  }
2120  }
2121 }
2122 
2123 
2124 
2125 void XdrIO::read_serialized_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
2126 {
2127  const bool read_entity_info = version_at_least_0_9_2();
2128  const bool use_new_header_type (this->version_at_least_1_3_0());
2129  if (read_entity_info)
2130  {
2131  new_header_id_type n_boundary_names = 0;
2132  std::vector<new_header_id_type> boundary_ids;
2133  std::vector<std::string> boundary_names;
2134 
2135  // Read the sideset names
2136  if (this->processor_id() == 0)
2137  {
2138  if (use_new_header_type)
2139  io.data(n_boundary_names);
2140  else
2141  {
2142  old_header_id_type temp;
2143  io.data(temp);
2144  n_boundary_names = temp;
2145  }
2146 
2147  boundary_names.resize(n_boundary_names);
2148 
2149  if (n_boundary_names)
2150  {
2151  if (use_new_header_type)
2152  io.data(boundary_ids);
2153  else
2154  {
2155  std::vector<old_header_id_type> temp(n_boundary_names);
2156  io.data(temp);
2157  boundary_ids.assign(temp.begin(), temp.end());
2158  }
2159  io.data(boundary_names);
2160  }
2161  }
2162 
2163  // Broadcast the boundary names to all processors
2164  this->comm().broadcast(n_boundary_names);
2165  if (n_boundary_names == 0)
2166  return;
2167 
2168  boundary_ids.resize(n_boundary_names);
2169  boundary_names.resize(n_boundary_names);
2170  this->comm().broadcast(boundary_ids);
2171  this->comm().broadcast(boundary_names);
2172 
2173  // Reassemble the named boundary information
2174  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2176 
2177  for (unsigned int i=0; i<n_boundary_names; ++i)
2178  boundary_map.insert(std::make_pair(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]));
2179  }
2180 }
2181 
2182 
2183 
2184 void XdrIO::pack_element (std::vector<xdr_id_type> & conn, const Elem * elem,
2185  const dof_id_type parent_id, const dof_id_type parent_pid) const
2186 {
2187  libmesh_assert(elem);
2188  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2189 
2190  conn.push_back(elem->n_nodes());
2191 
2192  conn.push_back (elem->type());
2193 
2194  // In version 0.7.0+ "id" is stored but it not used. In version 0.9.2+
2195  // we will store unique_id instead, therefore there is no need to
2196  // check for the older version when writing the unique_id.
2197  conn.push_back (elem->unique_id());
2198 
2199  if (parent_id != DofObject::invalid_id)
2200  {
2201  conn.push_back (parent_id);
2202  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2203  conn.push_back (parent_pid);
2204  }
2205 
2206  conn.push_back (elem->processor_id());
2207  conn.push_back (elem->subdomain_id());
2208 
2209 #ifdef LIBMESH_ENABLE_AMR
2210  conn.push_back (elem->p_level());
2211 #endif
2212 
2213  for (auto n : elem->node_index_range())
2214  conn.push_back (elem->node_id(n));
2215 }
2216 
2218 {
2219  return
2220  (this->version().find("0.9.2") != std::string::npos) ||
2221  (this->version().find("0.9.6") != std::string::npos) ||
2222  (this->version().find("1.1.0") != std::string::npos) ||
2223  (this->version().find("1.3.0") != std::string::npos);
2224 }
2225 
2227 {
2228  return
2229  (this->version().find("0.9.6") != std::string::npos) ||
2230  (this->version().find("1.1.0") != std::string::npos) ||
2231  (this->version().find("1.3.0") != std::string::npos);
2232 }
2233 
2235 {
2236  return
2237  (this->version().find("1.1.0") != std::string::npos) ||
2238  (this->version().find("1.3.0") != std::string::npos);
2239 }
2240 
2242 {
2243  return
2244  (this->version().find("1.3.0") != std::string::npos);
2245 }
2246 
2247 } // namespace libMesh
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, std::vector< new_header_id_type > &sizes, T)
Read the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:1568
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:39
void data(T &a, const char *comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:761
XdrIO(MeshBase &, const bool=false)
Constructor.
Definition: xdr_io.C:128
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:117
unique_id_type & set_unique_id()
Definition: dof_object.h:662
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: parallel.h:565
virtual void reserve_nodes(const dof_id_type nn)=0
Reserves space for a known number of nodes.
void read_serialized_shellface_bcs(Xdr &io, T)
Read the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2030
bool binary() const
Get/Set the flag indicating if we should read/write binary.
Definition: xdr_io.h:103
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:1941
bool write_parallel() const
Report whether we should write parallel files.
Definition: xdr_io.h:358
bool _binary
Definition: xdr_io.h:335
static UniquePtr< Elem > build(const ElemType type, Elem *p=libmesh_nullptr)
Definition: elem.C:238
virtual SimpleRange< node_iterator > local_node_ptr_range()=0
void write_serialized_nodesets(Xdr &io, const new_header_id_type n_nodesets) const
Write the boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1194
virtual ElemType type() const =0
const std::string & polynomial_level_file_name() const
Get/Set the polynomial degree file name.
Definition: xdr_io.h:164
unsigned int p_level() const
Definition: elem.h:2422
virtual element_iterator local_level_elements_end(unsigned int level)=0
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:656
bool operator<(const OrderWrapper &lhs, const OrderWrapper &rhs)
Definition: fe_type.h:96
The definition of the const_element_iterator struct.
Definition: mesh_base.h:1494
void read_serialized_subdomain_names(Xdr &io)
Read subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:1506
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh...
Definition: mesh_input.h:97
IntRange< unsigned short > side_index_range() const
Definition: elem.h:2083
static void set_node_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the nodes.
Definition: partitioner.C:416
virtual void read(const std::string &) libmesh_override
This method implements reading a mesh from a specified file.
Definition: xdr_io.C:1314
ElemType
Defines an enum for geometric element types.
IntRange< unsigned short > node_index_range() const
Definition: elem.h:2065
void close()
Closes the file if it is open.
Definition: xdr_cxx.C:271
unsigned short int side
Definition: xdr_io.C:49
void write_serialized_edge_bcs(Xdr &io, const new_header_id_type n_edge_bcs) const
Write the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1180
std::size_t n_edge_conds() const
processor_id_type n_processors() const
This is the base class from which all geometric element types are derived.
Definition: elem.h:89
MeshBase & mesh
uint8_t processor_id_type
Definition: id_types.h:99
uint32_t old_header_id_type
Definition: xdr_io.h:60
void add_child(Elem *elem)
Adds a child pointer to the array of children of this element.
Definition: elem.C:1608
const class libmesh_nullptr_t libmesh_nullptr
void set_refinement_flag(const RefinementState rflag)
Sets the value of the refinement flag for the element.
Definition: elem.h:2513
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2226
std::size_t n_boundary_conds() const
void write_serialized_side_bcs(Xdr &io, const new_header_id_type n_side_bcs) const
Write the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1173
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end...
const MT & mesh() const
Definition: mesh_output.h:216
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
The libMesh namespace provides an interface to certain functionality in the library.
dof_id_type dof_id
Definition: xdr_io.C:48
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shell...
Definition: xdr_io.C:1902
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:564
virtual element_iterator local_level_elements_begin(unsigned int level)=0
boundary_id_type bc_id
Definition: xdr_io.C:50
const std::map< boundary_id_type, std::string > & get_sideset_name_map() const
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:2125
IntRange< unsigned short > edge_index_range() const
Definition: elem.h:2074
This is the MeshBase class.
Definition: mesh_base.h:68
SimpleRange< ChildRefIter > child_ref_range()
Returns a range with all children of a parent element, usable in range-based for loops.
Definition: elem.h:1699
dof_id_type & set_id()
Definition: dof_object.h:641
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2234
libmesh_assert(j)
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2217
virtual void write(const std::string &) libmesh_override
This method implements writing a mesh to a specified file.
Definition: xdr_io.C:167
virtual unsigned int n_nodes() const =0
largest_id_type xdr_id_type
Definition: xdr_io.h:57
std::vector< boundary_id_type > boundary_ids(const Node *node) const
std::size_t n_shellface_conds() const
std::map< boundary_id_type, std::string > & set_sideset_name_map()
const std::string & boundary_condition_file_name() const
Get/Set the boundary condition file name.
Definition: xdr_io.h:146
void send_receive(const unsigned int dest_processor_id, const T1 &send, const unsigned int source_processor_id, T2 &recv, const MessageTag &send_tag=no_tag, const MessageTag &recv_tag=any_tag) const
Send data send to one processor while simultaneously receiving other data recv from a (potentially di...
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
const dof_id_type n_nodes
Definition: tecplot_io.C:67
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1295
int8_t boundary_id_type
Definition: id_types.h:51
void shellface_boundary_ids(const Elem *const elem, const unsigned short int shellface, std::vector< boundary_id_type > &vec_to_fill) const
static const boundary_id_type invalid_id
Number used for internal use.
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem) const
Write the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:356
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2241
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:50
const std::map< boundary_id_type, std::string > & get_nodeset_name_map() const
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes) const
Write the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:719
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
Encapsulates the MPI tag integers.
Definition: parallel.h:227
bool reading() const
Definition: xdr_cxx.h:113
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:1271
bool writing() const
Definition: xdr_cxx.h:119
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
Pack an element into a transfer buffer for parallel communication.
Definition: xdr_io.C:2184
The BoundaryInfo class contains information relevant to boundary conditions including storing faces...
Definition: boundary_info.h:56
void barrier() const
Pause execution until all processors reach a certain point.
SimpleRange< I > as_range(const std::pair< I, I > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
unsigned int _field_width
Definition: xdr_io.h:340
virtual element_iterator local_elements_end()=0
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:324
void read_serialized_nodesets(Xdr &io, T)
Read the nodeset conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2038
void read_header(Xdr &io, std::vector< T > &meta_data)
Read header information - templated to handle old (4-byte) or new (8-byte) header id types...
Definition: xdr_io.C:1444
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
Blocking-send to one processor with data-defined type.
subdomain_id_type subdomain_id() const
Definition: elem.h:1951
PetscErrorCode Vec Mat libmesh_dbg_var(j)
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:199
void broadcast(T &data, const unsigned int root_id=0) const
Take a local value and broadcast it to all processors.
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1293
This class forms the base class for all other classes that are expected to be implemented in parallel...
uint64_t new_header_id_type
Definition: xdr_io.h:63
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:68
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes)
Read the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:1754
bool _write_unique_id
Definition: xdr_io.h:339
void read_serialized_edge_bcs(Xdr &io, T)
Read the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2022
void write_serialized_shellface_bcs(Xdr &io, const new_header_id_type n_shellface_bcs) const
Write the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1187
static const std::size_t io_blksize
Define the block size to use for chunked IO.
Definition: xdr_io.h:350
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
void write_serialized_subdomain_names(Xdr &io) const
Write subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:315
void gather(const unsigned int root_id, const T &send, std::vector< T > &recv) const
Take a vector of length comm.size(), and on processor root_id fill in recv[processor_id] = the value ...
const Parallel::Communicator & comm() const
OStreamProxy out
std::vector< boundary_id_type > edge_boundary_ids(const Elem *const elem, const unsigned short int edge) const
unsigned int mesh_dimension() const
Definition: mesh_base.C:148
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
void add_shellface(const dof_id_type elem, const unsigned short int shellface, const boundary_id_type id)
Add shell face shellface of element number elem with boundary id id to the boundary information data ...
static const bool value
Definition: xdr_io.C:108
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:91
virtual unsigned int dim() const =0
virtual ~XdrIO()
Destructor.
Definition: xdr_io.C:161
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
Blocking-receive from one processor with data-defined type.
std::size_t n_nodeset_conds() const
const std::string & subdomain_map_file_name() const
Get/Set the subdomain file name.
Definition: xdr_io.h:158
const std::string & partition_map_file_name() const
Get/Set the partitioning file name.
Definition: xdr_io.h:152
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1831
IterBase * data
Ideally this private member data should have protected access.
bool legacy() const
Get/Set the flag indicating if we should read/write legacy.
Definition: xdr_io.h:109
dof_id_type id() const
Definition: dof_object.h:632
subdomain_id_type n_subdomains() const
Definition: mesh_base.C:334
const std::string & version() const
Get/Set the version string.
Definition: xdr_io.h:140
virtual dof_id_type n_nodes() const =0
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:825
virtual const Elem * elem_ptr(const dof_id_type i) const =0
virtual dof_id_type n_elem() const =0
unique_id_type unique_id() const
Definition: dof_object.h:649
void sum(T &r) const
Take a local variable and replace it with the sum of it&#39;s values on all processors.
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:672
void hack_p_level(const unsigned int p)
Sets the value of the p-refinement level for the element without altering the p-level of its ancestor...
Definition: elem.h:2606
long double min(long double a, double b)
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
virtual void reserve_elem(const dof_id_type ne)=0
Reserves space for a known number of elements.
uint8_t unique_id_type
Definition: id_types.h:79
void read_serialized_side_bcs(Xdr &io, T)
Read the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2014
MessageTag get_unique_tag(int tagvalue) const
Get a tag that is unique to this Communicator.
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_sh...
Definition: xdr_io.C:1040
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
processor_id_type processor_id() const
uint8_t dof_id_type
Definition: id_types.h:64
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:567
processor_id_type processor_id() const
Definition: dof_object.h:694
void add_edge(const dof_id_type elem, const unsigned short int edge, const boundary_id_type id)
Add edge edge of element number elem with boundary id id to the boundary information data structure...