libMesh
nemesis_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ includes
20 #include <numeric> // std::accumulate
21 
22 // LibMesh includes
23 #include "libmesh/distributed_mesh.h"
24 #include "libmesh/elem.h"
25 #include "libmesh/exodusII_io.h"
26 #include "libmesh/libmesh_logging.h"
27 #include "libmesh/nemesis_io.h"
28 #include "libmesh/nemesis_io_helper.h"
29 #include "libmesh/node.h"
30 #include "libmesh/parallel.h"
31 #include "libmesh/utility.h" // is_sorted, deallocate
32 #include "libmesh/boundary_info.h"
33 #include "libmesh/mesh_communication.h"
34 
35 namespace libMesh
36 {
37 
38 
39 //-----------------------------------------------
40 // anonymous namespace for implementation details
41 namespace {
42 struct CompareGlobalIdxMappings
43 {
44  // strict weak ordering for a.first -> a.second mapping. since we can only map to one
45  // value only order the first entry
46  bool operator()(const std::pair<unsigned int, unsigned int> & a,
47  const std::pair<unsigned int, unsigned int> & b) const
48  { return a.first < b.first; }
49 
50  // strict weak ordering for a.first -> a.second mapping. lookups will
51  // be in terms of a single integer, which is why we need this method.
52  bool operator()(const std::pair<unsigned int, unsigned int> & a,
53  const unsigned int b) const
54  { return a.first < b; }
55 };
56 
57 // Nemesis & ExodusII use int for all integer values, even the ones which
58 // should never be negative. we like to use unsigned as a force of habit,
59 // this trivial little method saves some typing & also makes sure something
60 // is not horribly wrong.
61 template <typename T>
62 inline unsigned int to_uint ( const T & t )
63 {
64  libmesh_assert_equal_to (t, static_cast<T>(static_cast<unsigned int>(t)));
65 
66  return static_cast<unsigned int>(t);
67 }
68 
69 // test equality for a.first -> a.second mapping. since we can only map to one
70 // value only test the first entry
71 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API) && !defined(NDEBUG)
72 inline bool global_idx_mapping_equality (const std::pair<unsigned int, unsigned int> & a,
73  const std::pair<unsigned int, unsigned int> & b)
74 {
75  return a.first == b.first;
76 }
77 #endif
78 
79 }
80 
81 
82 
83 // ------------------------------------------------------------
84 // Nemesis_IO class members
86 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
87  bool single_precision
88 #else
89  bool
90 #endif
91  ) :
92  MeshInput<MeshBase> (mesh, /*is_parallel_format=*/true),
93  MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
94  ParallelObject (mesh),
95 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
96  nemhelper(new Nemesis_IO_Helper(*this, false, single_precision)),
97  _timestep(1),
98 #endif
99  _verbose (false),
100  _append(false)
101 {
102 }
103 
104 
105 
106 // Destructor. Defined in the C file so we can be sure to get away
107 // with a forward declaration of Nemesis_IO_Helper in the header file.
109 {
110 }
111 
112 
113 
114 void Nemesis_IO::verbose (bool set_verbosity)
115 {
116  _verbose = set_verbosity;
117 
118 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
119  // Set the verbose flag in the helper object
120  // as well.
121  nemhelper->verbose = _verbose;
122 #endif
123 }
124 
125 
126 
127 void Nemesis_IO::append(bool val)
128 {
129  _append = val;
130 }
131 
132 
133 
134 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
135 void Nemesis_IO::read (const std::string & base_filename)
136 {
137  // On one processor, Nemesis and ExodusII should be equivalent, so
138  // let's cowardly defer to that implementation...
139  if (this->n_processors() == 1)
140  {
141  // We can do this in one line but if the verbose flag was set in this
142  // object, it will no longer be set... thus no extra print-outs for serial runs.
143  // ExodusII_IO(this->mesh()).read (base_filename); // ambiguous when Nemesis_IO is multiply-inherited
144 
146  ExodusII_IO(mesh).read (base_filename);
147  return;
148  }
149 
150  LOG_SCOPE ("read()","Nemesis_IO");
151 
152  // This function must be run on all processors at once
153  parallel_object_only();
154 
155  if (_verbose)
156  {
157  libMesh::out << "[" << this->processor_id() << "] ";
158  libMesh::out << "Reading Nemesis file on processor: " << this->processor_id() << std::endl;
159  }
160 
161  // Construct the Nemesis filename based on the number of processors and the
162  // current processor ID.
163  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
164 
165  if (_verbose)
166  libMesh::out << "Opening file: " << nemesis_filename << std::endl;
167 
168  // Open the Exodus file in EX_READ mode
169  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/true);
170 
171  // Get a reference to the mesh. We need to be specific
172  // since Nemesis_IO is multiply-inherited
173  // MeshBase & mesh = this->mesh();
175 
176  // We're reading a file on each processor, so our mesh is
177  // partitioned into that many parts as it's created
178  this->set_n_partitions(this->n_processors());
179 
180  // Local information: Read the following information from the standard Exodus header
181  // title[0]
182  // num_dim
183  // num_nodes
184  // num_elem
185  // num_elem_blk
186  // num_node_sets
187  // num_side_sets
188  nemhelper->read_header();
189  nemhelper->print_header();
190 
191  // Get global information: number of nodes, elems, blocks, nodesets and sidesets
192  nemhelper->get_init_global();
193 
194  // Get "load balance" information. This includes the number of internal & border
195  // nodes and elements as well as the number of communication maps.
196  nemhelper->get_loadbal_param();
197 
198  // Do some error checking
199  if (nemhelper->num_external_nodes)
200  libmesh_error_msg("ERROR: there should be no external nodes in an element-based partitioning!");
201 
202  libmesh_assert_equal_to (nemhelper->num_nodes,
203  (nemhelper->num_internal_nodes +
204  nemhelper->num_border_nodes));
205 
206  libmesh_assert_equal_to (nemhelper->num_elem,
207  (nemhelper->num_internal_elems +
208  nemhelper->num_border_elems));
209 
210  libmesh_assert_less_equal (nemhelper->num_nodes, nemhelper->num_nodes_global);
211  libmesh_assert_less_equal (nemhelper->num_elem, nemhelper->num_elems_global);
212 
213  // Read nodes from the exodus file: this fills the nemhelper->x,y,z arrays.
214  nemhelper->read_nodes();
215 
216  // Reads the nemhelper->node_num_map array, node_num_map[i] is the global node number for
217  // local node number i.
218  nemhelper->read_node_num_map();
219 
220  // The get_cmap_params() function reads in the:
221  // node_cmap_ids[],
222  // node_cmap_node_cnts[],
223  // elem_cmap_ids[],
224  // elem_cmap_elem_cnts[],
225  nemhelper->get_cmap_params();
226 
227  // Read the IDs of the interior, boundary, and external nodes. This function
228  // fills the vectors:
229  // node_mapi[],
230  // node_mapb[],
231  // node_mape[]
232  nemhelper->get_node_map();
233 
234  // Read each node communication map for this processor. This function
235  // fills the vectors of vectors named:
236  // node_cmap_node_ids[][]
237  // node_cmap_proc_ids[][]
238  nemhelper->get_node_cmap();
239 
240  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_cnts.size());
241  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_ids.size());
242  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_proc_ids.size());
243 
244 #ifndef NDEBUG
245  // We expect the communication maps to be symmetric - e.g. if processor i thinks it
246  // communicates with processor j, then processor j should also be expecting to
247  // communicate with i. We can assert that here easily enough with an alltoall,
248  // but let's only do it when not in optimized mode to limit unnecessary communication.
249  {
250  std::vector<unsigned char> pid_send_partner (this->n_processors(), 0);
251 
252  // strictly speaking, we should expect to communicate with ourself...
253  pid_send_partner[this->processor_id()] = 1;
254 
255  // mark each processor id we reference with a node cmap
256  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
257  {
258  libmesh_assert_less (to_uint(nemhelper->node_cmap_ids[cmap]), this->n_processors());
259 
260  pid_send_partner[nemhelper->node_cmap_ids[cmap]] = 1;
261  }
262 
263  // Copy the send pairing so we can catch the receive paring and
264  // test for equality
265  const std::vector<unsigned char> pid_recv_partner (pid_send_partner);
266 
267  this->comm().alltoall (pid_send_partner);
268 
269  libmesh_assert (pid_send_partner == pid_recv_partner);
270  }
271 #endif
272 
273  // We now have enough information to infer node ownership. We start by assuming
274  // we own all the nodes on this processor. We will then interrogate the
275  // node cmaps and see if a lower-rank processor is associated with any of
276  // our nodes. If so, then that processor owns the node, not us...
277  std::vector<processor_id_type> node_ownership (nemhelper->num_internal_nodes +
278  nemhelper->num_border_nodes,
279  this->processor_id());
280 
281  // a map from processor id to cmap number, to be used later
282  std::map<unsigned int, unsigned int> pid_to_cmap_map;
283 
284  // For each node_cmap...
285  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
286  {
287  // Good time for error checking...
288  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
289  nemhelper->node_cmap_node_ids[cmap].size());
290 
291  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
292  nemhelper->node_cmap_proc_ids[cmap].size());
293 
294  // In all the samples I have seen, node_cmap_ids[cmap] is the processor
295  // rank of the remote processor...
296  const int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
297 
298  libmesh_assert_less (adjcnt_pid_idx, this->n_processors());
299  libmesh_assert_not_equal_to (adjcnt_pid_idx, this->processor_id());
300 
301  // We only expect one cmap per adjacent processor
302  libmesh_assert (!pid_to_cmap_map.count(adjcnt_pid_idx));
303 
304  pid_to_cmap_map[adjcnt_pid_idx] = cmap;
305 
306  // ...and each node in that cmap...
307  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
308  {
309  // Are the node_cmap_ids and node_cmap_proc_ids really redundant?
310  libmesh_assert_equal_to (adjcnt_pid_idx, nemhelper->node_cmap_proc_ids[cmap][idx]);
311 
312  // we are expecting the exodus node numbering to be 1-based...
313  const unsigned int local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1;
314 
315  libmesh_assert_less (local_node_idx, node_ownership.size());
316 
317  // if the adjacent processor is lower rank than the current
318  // owner for this node, then it will get the node...
319  node_ownership[local_node_idx] =
320  std::min(node_ownership[local_node_idx],
321  cast_int<processor_id_type>(adjcnt_pid_idx));
322  }
323  } // We now should have established proper node ownership.
324 
325  // now that ownership is established, we can figure out how many nodes we
326  // will be responsible for numbering.
327  unsigned int num_nodes_i_must_number = 0;
328 
329  for (std::size_t idx=0; idx<node_ownership.size(); idx++)
330  if (node_ownership[idx] == this->processor_id())
331  num_nodes_i_must_number++;
332 
333  // more error checking...
334  libmesh_assert_greater_equal (num_nodes_i_must_number, to_uint(nemhelper->num_internal_nodes));
335  libmesh_assert (num_nodes_i_must_number <= to_uint(nemhelper->num_internal_nodes +
336  nemhelper->num_border_nodes));
337  if (_verbose)
338  libMesh::out << "[" << this->processor_id() << "] "
339  << "num_nodes_i_must_number="
340  << num_nodes_i_must_number
341  << std::endl;
342 
343  // The call to get_loadbal_param() gets 7 pieces of information. We allgather
344  // these now across all processors to determine some global numberings. We should
345  // also gather the number of nodes each processor thinks it will number so that
346  // we can (i) determine our offset, and (ii) do some error checking.
347  std::vector<int> all_loadbal_data ( 8 );
348  all_loadbal_data[0] = nemhelper->num_internal_nodes;
349  all_loadbal_data[1] = nemhelper->num_border_nodes;
350  all_loadbal_data[2] = nemhelper->num_external_nodes;
351  all_loadbal_data[3] = nemhelper->num_internal_elems;
352  all_loadbal_data[4] = nemhelper->num_border_elems;
353  all_loadbal_data[5] = nemhelper->num_node_cmaps;
354  all_loadbal_data[6] = nemhelper->num_elem_cmaps;
355  all_loadbal_data[7] = num_nodes_i_must_number;
356 
357  this->comm().allgather (all_loadbal_data, /* identical_buffer_sizes = */ true);
358 
359  // OK, we are now in a position to request new global indices for all the nodes
360  // we do not own
361 
362  // Let's get a unique message tag to use for send()/receive()
363  Parallel::MessageTag nodes_tag = mesh.comm().get_unique_tag(12345);
364 
365  std::vector<std::vector<int>>
366  needed_node_idxs (nemhelper->num_node_cmaps); // the indices we will ask for
367 
368  std::vector<Parallel::Request>
369  needed_nodes_requests (nemhelper->num_node_cmaps);
370 
371  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
372  {
373  // We know we will need no more indices than there are nodes
374  // in this cmap, but that number is an upper bound in general
375  // since the neighboring processor associated with the cmap
376  // may not actually own it
377  needed_node_idxs[cmap].reserve (nemhelper->node_cmap_node_cnts[cmap]);
378 
379  const unsigned int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
380 
381  // ...and each node in that cmap...
382  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
383  {
384  const unsigned int
385  local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1,
386  owning_pid_idx = node_ownership[local_node_idx];
387 
388  // add it to the request list for its owning processor.
389  if (owning_pid_idx == adjcnt_pid_idx)
390  {
391  const unsigned int
392  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
393  needed_node_idxs[cmap].push_back(global_node_idx);
394  }
395  }
396  // now post the send for this cmap
397  this->comm().send (adjcnt_pid_idx, // destination
398  needed_node_idxs[cmap], // send buffer
399  needed_nodes_requests[cmap], // request
400  nodes_tag);
401  } // all communication requests for getting updated global indices for border
402  // nodes have been initiated
403 
404  // Figure out how many nodes each processor thinks it will number and make sure
405  // that it adds up to the global number of nodes. Also, set up global node
406  // index offsets for each processor.
407  std::vector<unsigned int>
408  all_num_nodes_i_must_number (this->n_processors());
409 
410  for (unsigned int pid=0; pid<this->n_processors(); pid++)
411  all_num_nodes_i_must_number[pid] = all_loadbal_data[8*pid + 7];
412 
413  // The sum of all the entries in this vector should sum to the number of global nodes
414  libmesh_assert (std::accumulate(all_num_nodes_i_must_number.begin(),
415  all_num_nodes_i_must_number.end(),
416  0) == nemhelper->num_nodes_global);
417 
418  unsigned int my_next_node = 0;
419  for (unsigned int pid=0; pid<this->processor_id(); pid++)
420  my_next_node += all_num_nodes_i_must_number[pid];
421 
422  const unsigned int my_node_offset = my_next_node;
423 
424  if (_verbose)
425  libMesh::out << "[" << this->processor_id() << "] "
426  << "my_node_offset="
427  << my_node_offset
428  << std::endl;
429 
430  // Add internal nodes to the DistributedMesh, using the node ID offset we
431  // computed and the current processor's ID.
432  for (unsigned int i=0; i<to_uint(nemhelper->num_internal_nodes); ++i)
433  {
434  const unsigned int local_node_idx = nemhelper->node_mapi[i]-1;
435 #ifndef NDEBUG
436  const unsigned int owning_pid_idx = node_ownership[local_node_idx];
437 #endif
438 
439  // an internal node we do not own? huh??
440  libmesh_assert_equal_to (owning_pid_idx, this->processor_id());
441  libmesh_assert_less (my_next_node, to_uint(nemhelper->num_nodes_global));
442 
443  // "Catch" the node pointer after addition, make sure the
444  // ID matches the requested value.
445  Node * added_node =
446  mesh.add_point (Point(nemhelper->x[local_node_idx],
447  nemhelper->y[local_node_idx],
448  nemhelper->z[local_node_idx]),
449  my_next_node,
450  this->processor_id());
451 
452  // Make sure the node we added has the ID we thought it would
453  if (added_node->id() != my_next_node)
454  {
455  libMesh::err << "Error, node added with ID " << added_node->id()
456  << ", but we wanted ID " << my_next_node << std::endl;
457  }
458 
459  // update the local->global index map, when we are done
460  // it will be 0-based.
461  nemhelper->node_num_map[local_node_idx] = my_next_node++;
462  }
463 
464  // Now, for the boundary nodes... We may very well own some of them,
465  // but there may be others for which we have requested the new global
466  // id. We expect to be asked for the ids of the ones we own, so
467  // we need to create a map from the old global id to the new one
468  // we are about to create.
469  typedef std::vector<std::pair<unsigned int, unsigned int>> global_idx_mapping_type;
470  global_idx_mapping_type old_global_to_new_global_map;
471  old_global_to_new_global_map.reserve (num_nodes_i_must_number // total # i will have
472  - (my_next_node // amount i have thus far
473  - my_node_offset)); // this should be exact!
474  CompareGlobalIdxMappings global_idx_mapping_comp;
475 
476  for (unsigned int i=0; i<to_uint(nemhelper->num_border_nodes); ++i)
477  {
478  const unsigned int
479  local_node_idx = nemhelper->node_mapb[i]-1,
480  owning_pid_idx = node_ownership[local_node_idx];
481 
482  // if we own it...
483  if (owning_pid_idx == this->processor_id())
484  {
485  const unsigned int
486  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
487 
488  // we will number it, and create a mapping from its old global index to
489  // the new global index, for lookup purposes when neighbors come calling
490  old_global_to_new_global_map.push_back(std::make_pair(global_node_idx,
491  my_next_node));
492 
493  // "Catch" the node pointer after addition, make sure the
494  // ID matches the requested value.
495  Node * added_node =
496  mesh.add_point (Point(nemhelper->x[local_node_idx],
497  nemhelper->y[local_node_idx],
498  nemhelper->z[local_node_idx]),
499  my_next_node,
500  this->processor_id());
501 
502  // Make sure the node we added has the ID we thought it would
503  if (added_node->id() != my_next_node)
504  {
505  libMesh::err << "Error, node added with ID " << added_node->id()
506  << ", but we wanted ID " << my_next_node << std::endl;
507  }
508 
509  // update the local->global index map, when we are done
510  // it will be 0-based.
511  nemhelper->node_num_map[local_node_idx] = my_next_node++;
512  }
513  }
514  // That should cover numbering all the nodes which belong to us...
515  libmesh_assert_equal_to (num_nodes_i_must_number, (my_next_node - my_node_offset));
516 
517  // Let's sort the mapping so we can efficiently answer requests
518  std::sort (old_global_to_new_global_map.begin(),
519  old_global_to_new_global_map.end(),
520  global_idx_mapping_comp);
521 
522  // and it had better be unique...
523  libmesh_assert (std::unique (old_global_to_new_global_map.begin(),
524  old_global_to_new_global_map.end(),
525  global_idx_mapping_equality) ==
526  old_global_to_new_global_map.end());
527 
528  // We can now catch incoming requests and process them. for efficiency
529  // let's do whatever is available next
530  std::map<unsigned int, std::vector<int>> requested_node_idxs; // the indices asked of us
531 
532  std::vector<Parallel::Request> requested_nodes_requests(nemhelper->num_node_cmaps);
533 
534  // We know we will receive the request from a given processor before
535  // we receive its reply to our request. However, we may receive
536  // a request and a response from one processor before getting
537  // a request from another processor. So what we are doing here
538  // is processing whatever message comes next, while recognizing
539  // we will receive a request from a processor before receiving
540  // its reply
541  std::vector<bool> processed_cmap (nemhelper->num_node_cmaps, false);
542 
543  for (unsigned int comm_step=0; comm_step<2*to_uint(nemhelper->num_node_cmaps); comm_step++)
544  {
545  // query the first message which is available
546  const Parallel::Status
548  nodes_tag));
549  const unsigned int
550  requesting_pid_idx = status.source(),
551  source_pid_idx = status.source();
552 
553  // this had better be from a processor we are expecting...
554  libmesh_assert (pid_to_cmap_map.count(requesting_pid_idx));
555 
556  // the local cmap which corresponds to the source processor
557  const unsigned int cmap = pid_to_cmap_map[source_pid_idx];
558 
559  if (!processed_cmap[cmap])
560  {
561  processed_cmap[cmap] = true;
562 
563  // we should only get one request per paired processor
564  libmesh_assert (!requested_node_idxs.count(requesting_pid_idx));
565 
566  // get a reference to the request buffer for this processor to
567  // avoid repeated map lookups
568  std::vector<int> & xfer_buf (requested_node_idxs[requesting_pid_idx]);
569 
570  // actually receive the message.
571  this->comm().receive (requesting_pid_idx, xfer_buf, nodes_tag);
572 
573  // Fill the request
574  for (std::size_t i=0; i<xfer_buf.size(); i++)
575  {
576  // the requested old global node index, *now 0-based*
577  const unsigned int old_global_node_idx = xfer_buf[i];
578 
579  // find the new global node index for the requested node -
580  // note that requesting_pid_idx thinks we own this node,
581  // so we better!
582  const global_idx_mapping_type::const_iterator it =
583  std::lower_bound (old_global_to_new_global_map.begin(),
584  old_global_to_new_global_map.end(),
585  old_global_node_idx,
586  global_idx_mapping_comp);
587 
588  libmesh_assert (it != old_global_to_new_global_map.end());
589  libmesh_assert_equal_to (it->first, old_global_node_idx);
590  libmesh_assert_greater_equal (it->second, my_node_offset);
591  libmesh_assert_less (it->second, my_next_node);
592 
593  // overwrite the requested old global node index with the new global index
594  xfer_buf[i] = it->second;
595  }
596 
597  // and send the new global indices back to the processor which asked for them
598  this->comm().send (requesting_pid_idx,
599  xfer_buf,
600  requested_nodes_requests[cmap],
601  nodes_tag);
602  } // done processing the request
603 
604  // this is the second time we have heard from this processor,
605  // so it must be its reply to our request
606  else
607  {
608  // a long time ago, we sent off our own requests. now it is time to catch the
609  // replies and get the new global node numbering. note that for any reply
610  // we receive, the corresponding nonblocking send from above *must* have been
611  // completed, since the reply is in response to that request!!
612 
613  // if we have received a reply, our send *must* have completed
614  // (note we never actually need to wait on the request)
615  libmesh_assert (needed_nodes_requests[cmap].test());
616  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_ids[cmap]), source_pid_idx);
617 
618  // now post the receive for this cmap
619  this->comm().receive (source_pid_idx,
620  needed_node_idxs[cmap],
621  nodes_tag);
622 
623  libmesh_assert_less_equal (needed_node_idxs[cmap].size(),
624  nemhelper->node_cmap_node_ids[cmap].size());
625 
626  for (std::size_t i=0, j=0; i<nemhelper->node_cmap_node_ids[cmap].size(); i++)
627  {
628  const unsigned int
629  local_node_idx = nemhelper->node_cmap_node_ids[cmap][i]-1,
630  owning_pid_idx = node_ownership[local_node_idx];
631 
632  // if this node is owned by source_pid_idx, its new global id
633  // is in the buffer we just received
634  if (owning_pid_idx == source_pid_idx)
635  {
636  libmesh_assert_less (j, needed_node_idxs[cmap].size());
637 
638  const unsigned int // now 0-based!
639  global_node_idx = needed_node_idxs[cmap][j++];
640 
641  // "Catch" the node pointer after addition, make sure the
642  // ID matches the requested value.
643  Node * added_node =
644  mesh.add_point (Point(nemhelper->x[local_node_idx],
645  nemhelper->y[local_node_idx],
646  nemhelper->z[local_node_idx]),
647  cast_int<dof_id_type>(global_node_idx),
648  cast_int<processor_id_type>(source_pid_idx));
649 
650  // Make sure the node we added has the ID we thought it would
651  if (added_node->id() != global_node_idx)
652  {
653  libMesh::err << "Error, node added with ID " << added_node->id()
654  << ", but we wanted ID " << global_node_idx << std::endl;
655  }
656 
657  // update the local->global index map, when we are done
658  // it will be 0-based.
659  nemhelper->node_num_map[local_node_idx] = global_node_idx;
660 
661  // we are not really going to use my_next_node again, but we can
662  // keep incrementing it to track how many nodes we have added
663  // to the mesh
664  my_next_node++;
665  }
666  }
667  }
668  } // end of node index communication loop
669 
670  // we had better have added all the nodes we need to!
671  libmesh_assert_equal_to ((my_next_node - my_node_offset), to_uint(nemhelper->num_nodes));
672 
673  // After all that, we should be done with all node-related arrays *except* the
674  // node_num_map, which we have transformed to use our new numbering...
675  // So let's clean up the arrays we are done with.
676  {
677  Utility::deallocate (nemhelper->node_mapi);
678  Utility::deallocate (nemhelper->node_mapb);
679  Utility::deallocate (nemhelper->node_mape);
680  Utility::deallocate (nemhelper->node_cmap_ids);
681  Utility::deallocate (nemhelper->node_cmap_node_cnts);
682  Utility::deallocate (nemhelper->node_cmap_node_ids);
683  Utility::deallocate (nemhelper->node_cmap_proc_ids);
687  Utility::deallocate (needed_node_idxs);
688  Utility::deallocate (node_ownership);
689  }
690 
691  Parallel::wait (requested_nodes_requests);
692  requested_node_idxs.clear();
693 
694  // See what the node count is up to now.
695  if (_verbose)
696  {
697  // Report the number of nodes which have been added locally
698  libMesh::out << "[" << this->processor_id() << "] ";
699  libMesh::out << "mesh.n_nodes()=" << mesh.n_nodes() << std::endl;
700 
701  // Reports the number of nodes that have been added in total.
702  libMesh::out << "[" << this->processor_id() << "] ";
703  libMesh::out << "mesh.parallel_n_nodes()=" << mesh.parallel_n_nodes() << std::endl;
704  }
705 
706 
707 
708  // --------------------------------------------------------------------------------
709  // --------------------------------------------------------------------------------
710  // --------------------------------------------------------------------------------
711 
712 
713  // We can now read in the elements...Exodus stores them in blocks in which all
714  // elements have the same geometric type. This code is adapted directly from exodusII_io.C
715 
716  // Assertion: The sum of the border and internal elements on all processors
717  // should equal nemhelper->num_elems_global
718 #ifndef NDEBUG
719  {
720  int sum_internal_elems=0, sum_border_elems=0;
721  for (unsigned int j=3,c=0; c<this->n_processors(); j+=8,++c)
722  sum_internal_elems += all_loadbal_data[j];
723 
724  for (unsigned int j=4,c=0; c<this->n_processors(); j+=8,++c)
725  sum_border_elems += all_loadbal_data[j];
726 
727  if (_verbose)
728  {
729  libMesh::out << "[" << this->processor_id() << "] ";
730  libMesh::out << "sum_internal_elems=" << sum_internal_elems << std::endl;
731 
732  libMesh::out << "[" << this->processor_id() << "] ";
733  libMesh::out << "sum_border_elems=" << sum_border_elems << std::endl;
734  }
735 
736  libmesh_assert_equal_to (sum_internal_elems+sum_border_elems, nemhelper->num_elems_global);
737  }
738 #endif
739 
740  // We need to set the mesh dimension, but the following...
741  // mesh.set_mesh_dimension(static_cast<unsigned int>(nemhelper->num_dim));
742 
743  // ... is not sufficient since some codes report num_dim==3 for two dimensional
744  // meshes living in 3D, even though all the elements are of 2D type. Therefore,
745  // we instead use the dimension of the highest element found for the Mesh dimension,
746  // similar to what is done by the Exodus reader, except here it requires a
747  // parallel communication.
748  elems_of_dimension.resize(4, false); // will use 1-based
749 
750  // Compute my_elem_offset, the amount by which to offset the local elem numbering
751  // on my processor.
752  unsigned int my_next_elem = 0;
753  for (unsigned int pid=0; pid<this->processor_id(); ++pid)
754  my_next_elem += (all_loadbal_data[8*pid + 3]+ // num_internal_elems, proc pid
755  all_loadbal_data[8*pid + 4]); // num_border_elems, proc pid
756  const unsigned int my_elem_offset = my_next_elem;
757 
758  if (_verbose)
759  libMesh::out << "[" << this->processor_id() << "] "
760  << "my_elem_offset=" << my_elem_offset << std::endl;
761 
762 
763  // Fills in the:
764  // global_elem_blk_ids[] and
765  // global_elem_blk_cnts[] arrays.
766  nemhelper->get_eb_info_global();
767 
768  // // Fills in the vectors
769  // // elem_mapi[num_internal_elems]
770  // // elem_mapb[num_border_elems ]
771  // // These tell which of the (locally-numbered) elements are internal and which are border elements.
772  // // In our test example these arrays are sorted (but non-contiguous), which makes it possible to
773  // // binary search for each element ID... however I don't think we need to distinguish between the
774  // // two types, since either can have nodes the boundary!
775  // nemhelper->get_elem_map();
776 
777  // Fills in the vectors of vectors:
778  // elem_cmap_elem_ids[][]
779  // elem_cmap_side_ids[][]
780  // elem_cmap_proc_ids[][]
781  // These arrays are of size num_elem_cmaps * elem_cmap_elem_cnts[i], i = 0..num_elem_cmaps
782  nemhelper->get_elem_cmap();
783 
784  // Get information about the element blocks:
785  // (read in the array nemhelper->block_ids[])
786  nemhelper->read_block_info();
787 
788  // Reads the nemhelper->elem_num_map array, elem_num_map[i] is the global element number for
789  // local element number i.
790  nemhelper->read_elem_num_map();
791 
792  // Instantiate the ElementMaps interface. This is what translates LibMesh's
793  // element numbering scheme to Exodus's.
795 
796  // Read in the element connectivity for each block by
797  // looping over all the blocks.
798  for (unsigned int i=0; i<to_uint(nemhelper->num_elem_blk); i++)
799  {
800  // Read the information for block i: For nemhelper->block_ids[i], reads
801  // elem_type
802  // num_elem_this_blk
803  // num_nodes_per_elem
804  // num_attr
805  // connect <-- the nodal connectivity array for each element in the block.
806  nemhelper->read_elem_in_block(i);
807 
808  // Note that with parallel files it is possible we have no elements in
809  // this block!
810  if (!nemhelper->num_elem_this_blk) continue;
811 
812  // Set subdomain ID based on the block ID.
813  subdomain_id_type subdomain_id =
814  cast_int<subdomain_id_type>(nemhelper->block_ids[i]);
815 
816  // Create a type string (this uses the null-terminated string ctor).
817  const std::string type_str ( &(nemhelper->elem_type[0]) );
818 
819  // Set any relevant node/edge maps for this element
820  const ExodusII_IO_Helper::Conversion conv = em.assign_conversion(type_str);
821 
822  if (_verbose)
823  libMesh::out << "Reading a block of " << type_str << " elements." << std::endl;
824 
825  // Loop over all the elements in this block
826  for (unsigned int j=0; j<to_uint(nemhelper->num_elem_this_blk); j++)
827  {
828  Elem * elem = Elem::build (conv.get_canonical_type()).release();
829  libmesh_assert (elem);
830 
831  // Assign subdomain and processor ID to the newly-created Elem.
832  // Assigning the processor ID beforehand ensures that the Elem is
833  // not added as an "unpartitioned" element. Note that the element
834  // numbering in Exodus is also 1-based.
835  elem->subdomain_id() = subdomain_id;
836  elem->processor_id() = this->processor_id();
837  elem->set_id() = my_next_elem++;
838 #ifdef LIBMESH_ENABLE_UNIQUE_ID
839  elem->set_unique_id() = elem->id();
840 #endif
841 
842  // Mark that we have seen an element of the current element's
843  // dimension.
844  elems_of_dimension[elem->dim()] = true;
845 
846  // Add the created Elem to the Mesh, catch the Elem
847  // pointer that the Mesh throws back.
848  elem = mesh.add_elem (elem);
849 
850  // We are expecting the element "thrown back" by libmesh to have the ID we specified for it...
851  // Check to see that really is the case. Note that my_next_elem was post-incremented, so
852  // subtract 1 when performing the check.
853  if (elem->id() != my_next_elem-1)
854  libmesh_error_msg("Unexpected ID " \
855  << elem->id() \
856  << " set by parallel mesh. (expecting " \
857  << my_next_elem-1 \
858  << ").");
859 
860  // Set all the nodes for this element
861  if (_verbose)
862  libMesh::out << "[" << this->processor_id() << "] "
863  << "Setting nodes for Elem " << elem->id() << std::endl;
864 
865  for (unsigned int k=0; k<to_uint(nemhelper->num_nodes_per_elem); k++)
866  {
867  const unsigned int
868  gi = (j*nemhelper->num_nodes_per_elem + // index into connectivity array
869  conv.get_node_map(k)),
870  local_node_idx = nemhelper->connect[gi]-1, // local node index
871  global_node_idx = nemhelper->node_num_map[local_node_idx]; // new global node index
872 
873  // Set node number
874  elem->set_node(k) = mesh.node_ptr(global_node_idx);
875  }
876  } // for (unsigned int j=0; j<nemhelper->num_elem_this_blk; j++)
877  } // end for (unsigned int i=0; i<nemhelper->num_elem_blk; i++)
878 
879  libmesh_assert_equal_to ((my_next_elem - my_elem_offset), to_uint(nemhelper->num_elem));
880 
881  if (_verbose)
882  {
883  // Print local elems_of_dimension information
884  for (std::size_t i=1; i<elems_of_dimension.size(); ++i)
885  libMesh::out << "[" << this->processor_id() << "] "
886  << "elems_of_dimension[" << i << "]=" << elems_of_dimension[i] << std::endl;
887  }
888 
889  // Get the max dimension seen on the current processor
890  unsigned char max_dim_seen = 0;
891  for (std::size_t i=1; i<elems_of_dimension.size(); ++i)
892  if (elems_of_dimension[i])
893  max_dim_seen = static_cast<unsigned char>(i);
894 
895  // Do a global max to determine the max dimension seen by all processors.
896  // It should match -- I don't think we even support calculations on meshes
897  // with elements of different dimension...
898  this->comm().max(max_dim_seen);
899 
900  if (_verbose)
901  {
902  // Print the max element dimension from all processors
903  libMesh::out << "[" << this->processor_id() << "] "
904  << "max_dim_seen=" << +max_dim_seen << std::endl;
905  }
906 
907  // Set the mesh dimension to the largest encountered for an element
908  mesh.set_mesh_dimension(max_dim_seen);
909 
910 #if LIBMESH_DIM < 3
911  if (mesh.mesh_dimension() > LIBMESH_DIM)
912  libmesh_error_msg("Cannot open dimension " \
913  << mesh.mesh_dimension() \
914  << " mesh file when configured without " \
915  << mesh.mesh_dimension() \
916  << "D support." );
917 #endif
918 
919 
920  // Global sideset information, they are distributed as well, not sure if they will require communication...?
921  nemhelper->get_ss_param_global();
922 
923  if (_verbose)
924  {
925  libMesh::out << "[" << this->processor_id() << "] "
926  << "Read global sideset parameter information." << std::endl;
927 
928  // These global values should be the same on all processors...
929  libMesh::out << "[" << this->processor_id() << "] "
930  << "Number of global sideset IDs: " << nemhelper->global_sideset_ids.size() << std::endl;
931  }
932 
933  // Read *local* sideset info the same way it is done in
934  // exodusII_io_helper. May be called any time after
935  // nem_helper->read_header(); This sets num_side_sets and resizes
936  // elem_list, side_list, and id_list to num_elem_all_sidesets. Note
937  // that there appears to be the same number of sidesets in each file
938  // but they all have different numbers of entries (some are empty).
939  // Note that the sum of "nemhelper->num_elem_all_sidesets" over all
940  // processors should equal the sum of the entries in the "num_global_side_counts" array
941  // filled up by nemhelper->get_ss_param_global()
942  nemhelper->read_sideset_info();
943 
944  if (_verbose)
945  {
946  libMesh::out << "[" << this->processor_id() << "] "
947  << "nemhelper->num_side_sets = " << nemhelper->num_side_sets << std::endl;
948 
949  libMesh::out << "[" << this->processor_id() << "] "
950  << "nemhelper->num_elem_all_sidesets = " << nemhelper->num_elem_all_sidesets << std::endl;
951 
952  if (nemhelper->num_side_sets > 0)
953  {
954  libMesh::out << "Sideset names are: ";
955  std::map<int, std::string>::iterator
956  it = nemhelper->id_to_ss_names.begin(),
957  end = nemhelper->id_to_ss_names.end();
958  for (; it != end; ++it)
959  libMesh::out << "(" << it->first << "," << it->second << ") ";
960  libMesh::out << std::endl;
961  }
962  }
963 
964 #ifdef DEBUG
965  {
966  // In DEBUG mode, check that the global number of sidesets reported
967  // in each nemesis file matches the sum of all local sideset counts
968  // from each processor. This requires a small communication, so only
969  // do it in DEBUG mode.
970  int sum_num_global_side_counts = std::accumulate(nemhelper->num_global_side_counts.begin(),
971  nemhelper->num_global_side_counts.end(),
972  0);
973 
974  // MPI sum up the local files contributions
975  int sum_num_elem_all_sidesets = nemhelper->num_elem_all_sidesets;
976  this->comm().sum(sum_num_elem_all_sidesets);
977 
978  if (sum_num_global_side_counts != sum_num_elem_all_sidesets)
979  libmesh_error_msg("Error! global side count reported by Nemesis does not " \
980  << "match the side count reported by the individual files!");
981  }
982 #endif
983 
984  // Note that exodus stores sidesets in separate vectors but we want to pack
985  // them all into a single vector. So when we call read_sideset(), we pass an offset
986  // into the single vector of all IDs
987  for (int offset=0, i=0; i<nemhelper->num_side_sets; i++)
988  {
989  offset += (i > 0 ? nemhelper->num_sides_per_set[i-1] : 0); // Compute new offset
990  nemhelper->read_sideset (i, offset);
991  }
992 
993  // Now that we have the lists of elements, sides, and IDs, we are ready to set them
994  // in the BoundaryInfo object of our Mesh object. This is slightly different in parallel...
995  // For example, I think the IDs in each of the split Exodus files are numbered locally,
996  // and we have to know the appropriate ID for this processor to be able to set the
997  // entry in BoundaryInfo. This offset should be given by my_elem_offset determined in
998  // this function...
999 
1000  // Debugging:
1001  // Print entries of elem_list
1002  // libMesh::out << "[" << this->processor_id() << "] "
1003  // << "elem_list = ";
1004  // for (std::size_t e=0; e<nemhelper->elem_list.size(); e++)
1005  // {
1006  // libMesh::out << nemhelper->elem_list[e] << ", ";
1007  // }
1008  // libMesh::out << std::endl;
1009 
1010  // Print entries of side_list
1011  // libMesh::out << "[" << this->processor_id() << "] "
1012  // << "side_list = ";
1013  // for (std::size_t e=0; e<nemhelper->side_list.size(); e++)
1014  // {
1015  // libMesh::out << nemhelper->side_list[e] << ", ";
1016  // }
1017  // libMesh::out << std::endl;
1018 
1019 
1020  // Loop over the entries of the elem_list, get their pointers from the
1021  // Mesh data structure, and assign the appropriate side to the BoundaryInfo object.
1022  for (std::size_t e=0; e<nemhelper->elem_list.size(); e++)
1023  {
1024  // Calling mesh.elem_ptr() is an error if no element with that
1025  // id exists on this processor...
1026  //
1027  // Perhaps we should iterate over elements and look them up in
1028  // the elem list instead? Note that the IDs in elem_list are
1029  // not necessarily in order, so if we did instead loop over the
1030  // mesh, we would have to search the (unsorted) elem_list vector
1031  // for each entry! We'll settle for doing some error checking instead.
1032  Elem * elem = mesh.elem_ptr
1033  (my_elem_offset +
1034  (nemhelper->elem_list[e]-1)/*Exodus numbering is 1-based!*/);
1035 
1036  // The side numberings in libmesh and exodus are not 1:1, so we need to map
1037  // whatever side number is stored in Exodus into a libmesh side number using
1038  // a conv object...
1039  const ExodusII_IO_Helper::Conversion conv =
1040  em.assign_conversion(elem->type());
1041 
1042  // Finally, we are ready to add the element and its side to the BoundaryInfo object.
1043  // Call the version of add_side which takes a pointer, since we have already gone to
1044  // the trouble of getting said pointer...
1045  mesh.get_boundary_info().add_side(elem,
1046  cast_int<unsigned short>(conv.get_side_map(nemhelper->side_list[e]-1)), // Exodus numbering is 1-based
1047  cast_int<boundary_id_type>(nemhelper->id_list[e]));
1048  }
1049 
1050  // Debugging: make sure there are as many boundary conditions in the
1051  // boundary ID object as expected. Note that, at this point, the
1052  // mesh still thinks it's serial, so n_boundary_conds() returns the
1053  // local number of boundary conditions (and is therefore cheap)
1054  // which should match nemhelper->elem_list.size().
1055  {
1056  std::size_t nbcs = mesh.get_boundary_info().n_boundary_conds();
1057  if (nbcs != nemhelper->elem_list.size())
1058  libmesh_error_msg("[" << this->processor_id() << "] " \
1059  << "BoundaryInfo contains " \
1060  << nbcs \
1061  << " boundary conditions, while the Exodus file had " \
1062  << nemhelper->elem_list.size());
1063  }
1064 
1065  // Read global nodeset parameters? We might be able to use this to verify
1066  // something about the local files, but I haven't figured out what yet...
1067  nemhelper->get_ns_param_global();
1068 
1069  // Read local nodeset info
1070  nemhelper->read_nodeset_info();
1071 
1072  if (_verbose)
1073  {
1074  libMesh::out << "[" << this->processor_id() << "] ";
1075  libMesh::out << "nemhelper->num_node_sets=" << nemhelper->num_node_sets << std::endl;
1076  if (nemhelper->num_node_sets > 0)
1077  {
1078  libMesh::out << "Nodeset names are: ";
1079  std::map<int, std::string>::iterator
1080  it = nemhelper->id_to_ns_names.begin(),
1081  end = nemhelper->id_to_ns_names.end();
1082  for (; it != end; ++it)
1083  libMesh::out << "(" << it->first << "," << it->second << ") ";
1084  libMesh::out << std::endl;
1085  }
1086  }
1087 
1088  // // Debugging, what is currently in nemhelper->node_num_map anyway?
1089  // libMesh::out << "[" << this->processor_id() << "] "
1090  // << "nemhelper->node_num_map = ";
1091  //
1092  // for (std::size_t i=0; i<nemhelper->node_num_map.size(); ++i)
1093  // libMesh::out << nemhelper->node_num_map[i] << ", ";
1094  // libMesh::out << std::endl;
1095 
1096  // For each nodeset,
1097  for (int nodeset=0; nodeset<nemhelper->num_node_sets; nodeset++)
1098  {
1099  // Get the user-defined ID associated with the nodeset
1100  int nodeset_id = nemhelper->nodeset_ids[nodeset];
1101 
1102  if (_verbose)
1103  {
1104  libMesh::out << "[" << this->processor_id() << "] ";
1105  libMesh::out << "nemhelper->nodeset_ids[" << nodeset << "]=" << nodeset_id << std::endl;
1106  }
1107 
1108  // Read the nodeset from file, store them in a vector
1109  nemhelper->read_nodeset(nodeset);
1110 
1111  // Add nodes from the node_list to the BoundaryInfo object
1112  for (std::size_t node=0; node<nemhelper->node_list.size(); node++)
1113  {
1114  // Don't run past the end of our node map!
1115  if (to_uint(nemhelper->node_list[node]-1) >= nemhelper->node_num_map.size())
1116  libmesh_error_msg("Error, index is past the end of node_num_map array!");
1117 
1118  // We should be able to use the node_num_map data structure set up previously to determine
1119  // the proper global node index.
1120  unsigned global_node_id = nemhelper->node_num_map[ nemhelper->node_list[node]-1 /*Exodus is 1-based!*/ ];
1121 
1122  if (_verbose)
1123  {
1124  libMesh::out << "[" << this->processor_id() << "] "
1125  << "nodeset " << nodeset
1126  << ", local node number: " << nemhelper->node_list[node]-1
1127  << ", global node id: " << global_node_id
1128  << std::endl;
1129  }
1130 
1131  // Add the node to the BoundaryInfo object with the proper nodeset_id
1133  (cast_int<dof_id_type>(global_node_id),
1134  cast_int<boundary_id_type>(nodeset_id));
1135  }
1136  }
1137 
1138  // See what the elem count is up to now.
1139  if (_verbose)
1140  {
1141  // Report the number of elements which have been added locally
1142  libMesh::out << "[" << this->processor_id() << "] ";
1143  libMesh::out << "mesh.n_elem()=" << mesh.n_elem() << std::endl;
1144 
1145  // Reports the number of elements that have been added in total.
1146  libMesh::out << "[" << this->processor_id() << "] ";
1147  libMesh::out << "mesh.parallel_n_elem()=" << mesh.parallel_n_elem() << std::endl;
1148  }
1149 
1150  // For DistributedMesh, it seems that _is_serial is true by default. A hack to
1151  // make the Mesh think it's parallel might be to call:
1152  mesh.update_post_partitioning();
1154  mesh.delete_remote_elements();
1155 
1156  // And if that didn't work, then we're actually reading into a
1157  // ReplicatedMesh, so forget about gathering neighboring elements
1158  if (mesh.is_serial())
1159  return;
1160 
1161  // Gather neighboring elements so that the mesh has the proper "ghost" neighbor information.
1162  MeshCommunication().gather_neighboring_elements(cast_ref<DistributedMesh &>(mesh));
1163 }
1164 
1165 #else
1166 
1167 void Nemesis_IO::read (const std::string &)
1168 {
1169  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1170 }
1171 
1172 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1173 
1174 
1175 
1176 
1177 
1178 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1179 
1180 void Nemesis_IO::write (const std::string & base_filename)
1181 {
1182  // Get a constant reference to the mesh for writing
1183  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1184 
1185  // Create the filename for this processor given the base_filename passed in.
1186  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1187 
1188  // If the user has set the append flag here, it doesn't really make
1189  // sense: the intent of this function is to write a Mesh with no
1190  // data, while "appending" is really intended to add data to an
1191  // existing file. If we're verbose, print a message to this effect.
1192  if (_append && _verbose)
1193  libMesh::out << "Warning: Appending in Nemesis_IO::write() does not make sense.\n"
1194  << "Creating a new file instead!"
1195  << std::endl;
1196 
1197  nemhelper->create(nemesis_filename);
1198 
1199  // Initialize data structures and write some global Nemesis-specific data, such as
1200  // communication maps, to file.
1201  nemhelper->initialize(nemesis_filename,mesh);
1202 
1203  // Call the Nemesis-specialized version of write_nodal_coordinates() to write
1204  // the nodal coordinates.
1205  nemhelper->write_nodal_coordinates(mesh);
1206 
1207  // Call the Nemesis-specialized version of write_elements() to write
1208  // the elements. Note: Must write a zero if a given global block ID has no
1209  // elements...
1210  nemhelper->write_elements(mesh);
1211 
1212  // Call our specialized function to write the nodesets
1213  nemhelper->write_nodesets(mesh);
1214 
1215  // Call our specialized write_sidesets() function to write the sidesets to file
1216  nemhelper->write_sidesets(mesh);
1217 
1218  // Not sure if this is really necessary, but go ahead and flush the file
1219  // once we have written all this stuff.
1220  nemhelper->ex_err = exII::ex_update(nemhelper->ex_id);
1221 
1222  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1223  {
1224  libMesh::out << "Warning: Mesh contains edge boundary IDs, but these "
1225  << "are not supported by the Nemesis format."
1226  << std::endl;
1227  }
1228 }
1229 
1230 #else
1231 
1232 void Nemesis_IO::write (const std::string & )
1233 {
1234  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1235 }
1236 
1237 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1238 
1239 
1240 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1241 
1242 void Nemesis_IO::write_timestep (const std::string & fname,
1243  const EquationSystems & es,
1244  const int timestep,
1245  const Real time)
1246 {
1247  _timestep=timestep;
1248  write_equation_systems(fname,es);
1249 
1250  nemhelper->write_timestep(timestep, time);
1251 }
1252 
1253 #else
1254 
1255 void Nemesis_IO::write_timestep (const std::string &,
1256  const EquationSystems &,
1257  const int,
1258  const Real)
1259 {
1260  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1261 }
1262 
1263 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1264 
1265 
1266 
1267 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1268 
1269 void Nemesis_IO::prepare_to_write_nodal_data (const std::string & fname,
1270  const std::vector<std::string> & names)
1271 {
1272  const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1273 
1274  std::string nemesis_filename = nemhelper->construct_nemesis_filename(fname);
1275 
1276  if (!nemhelper->opened_for_writing)
1277  {
1278  // If we're appending, open() the file with read_only=false,
1279  // otherwise create() it and write the contents of the mesh to
1280  // it.
1281  if (_append)
1282  {
1283  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/false);
1284  // After opening the file, read the header so that certain
1285  // fields, such as the number of nodes and the number of
1286  // elements, are correctly initialized for the subsequent
1287  // call to write the nodal solution.
1288  nemhelper->read_header();
1289  }
1290  else
1291  {
1292  nemhelper->create(nemesis_filename);
1293  nemhelper->initialize(nemesis_filename,mesh);
1294  nemhelper->write_nodal_coordinates(mesh);
1295  nemhelper->write_elements(mesh);
1296  nemhelper->write_nodesets(mesh);
1297  nemhelper->write_sidesets(mesh);
1298 
1299  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1300  {
1301  libMesh::out << "Warning: Mesh contains edge boundary IDs, but these "
1302  << "are not supported by the ExodusII format."
1303  << std::endl;
1304  }
1305 
1306  // If we don't have any nodes written out on this processor,
1307  // Exodus seems to like us better if we don't try to write out any
1308  // variable names too...
1309 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1310 
1311  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1312 
1313  nemhelper->initialize_nodal_variables(complex_names);
1314 #else
1315  nemhelper->initialize_nodal_variables(names);
1316 #endif
1317  }
1318  }
1319 }
1320 
1321 #else
1322 
1323 void Nemesis_IO::prepare_to_write_nodal_data (const std::string &,
1324  const std::vector<std::string> &)
1325 {
1326  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1327 }
1328 
1329 #endif
1330 
1331 
1332 
1333 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1334 
1335 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1336  const NumericVector<Number> & parallel_soln,
1337  const std::vector<std::string> & names)
1338 {
1339  LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1340 
1341  this->prepare_to_write_nodal_data(base_filename, names);
1342 
1343  // Call the new version of write_nodal_solution() that takes a
1344  // NumericVector directly without localizing.
1345  nemhelper->write_nodal_solution(parallel_soln, names, _timestep);
1346 }
1347 
1348 #else
1349 
1350 void Nemesis_IO::write_nodal_data (const std::string &,
1351  const NumericVector<Number> &,
1352  const std::vector<std::string> &)
1353 {
1354  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1355 }
1356 
1357 #endif
1358 
1359 
1360 
1361 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1362 
1363 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1364  const std::vector<Number> & soln,
1365  const std::vector<std::string> & names)
1366 {
1367  LOG_SCOPE("write_nodal_data(serialized)", "Nemesis_IO");
1368 
1369  this->prepare_to_write_nodal_data(base_filename, names);
1370 
1371  nemhelper->write_nodal_solution(soln, names, _timestep);
1372 }
1373 
1374 #else
1375 
1376 void Nemesis_IO::write_nodal_data (const std::string &,
1377  const std::vector<Number> &,
1378  const std::vector<std::string> &)
1379 {
1380  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1381 }
1382 
1383 #endif
1384 
1385 
1386 
1387 
1388 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1389 
1390 void Nemesis_IO::write_global_data (const std::vector<Number> & soln,
1391  const std::vector<std::string> & names)
1392 {
1393  if (!nemhelper->opened_for_writing)
1394  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting global variables.");
1395 
1396 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1397 
1398  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1399 
1400  nemhelper->initialize_global_variables(complex_names);
1401 
1402  unsigned int num_values = soln.size();
1403  unsigned int num_vars = names.size();
1404  unsigned int num_elems = num_values / num_vars;
1405 
1406  // This will contain the real and imaginary parts and the magnitude
1407  // of the values in soln
1408  std::vector<Real> complex_soln(3*num_values);
1409 
1410  for (unsigned i=0; i<num_vars; ++i)
1411  {
1412  for (unsigned int j=0; j<num_elems; ++j)
1413  {
1414  Number value = soln[i*num_vars + j];
1415  complex_soln[3*i*num_elems + j] = value.real();
1416  }
1417  for (unsigned int j=0; j<num_elems; ++j)
1418  {
1419  Number value = soln[i*num_vars + j];
1420  complex_soln[3*i*num_elems + num_elems +j] = value.imag();
1421  }
1422  for (unsigned int j=0; j<num_elems; ++j)
1423  {
1424  Number value = soln[i*num_vars + j];
1425  complex_soln[3*i*num_elems + 2*num_elems + j] = std::abs(value);
1426  }
1427  }
1428 
1429  nemhelper->write_global_values(complex_soln, _timestep);
1430 
1431 #else
1432 
1433  // Call the Exodus writer implementation
1434  nemhelper->initialize_global_variables( names );
1435  nemhelper->write_global_values( soln, _timestep);
1436 
1437 #endif
1438 
1439 }
1440 
1441 #else
1442 
1443 void Nemesis_IO::write_global_data (const std::vector<Number> &,
1444  const std::vector<std::string> &)
1445 {
1446  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1447 }
1448 
1449 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1450 
1451 
1452 
1453 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1454 
1455 void Nemesis_IO::write_information_records (const std::vector<std::string> & records)
1456 {
1457  if (!nemhelper->opened_for_writing)
1458  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting information records.");
1459 
1460  // Call the Exodus writer implementation
1461  nemhelper->write_information_records( records );
1462 }
1463 
1464 
1465 #else
1466 
1467 void Nemesis_IO::write_information_records ( const std::vector<std::string> & )
1468 {
1469  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1470 }
1471 
1472 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1473 
1474 } // namespace libMesh
OStreamProxy err
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:117
double abs(double a)
unique_id_type & set_unique_id()
Definition: dof_object.h:662
This is the EquationSystems class.
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: parallel.h:565
void make_node_unique_ids_parallel_consistent(MeshBase &)
Assuming all unique_ids on local nodes are globally unique, and assuming all processor ids are parall...
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:1941
A Node is like a Point, but with more information.
Definition: node.h:52
virtual bool is_serial() const
Definition: mesh_base.h:140
static UniquePtr< Elem > build(const ElemType type, Elem *p=libmesh_nullptr)
Definition: elem.C:238
virtual ElemType type() const =0
const unsigned int any_source
Accept from any source.
Definition: parallel.h:204
Encapsulates the MPI_Status struct.
Definition: parallel.h:461
void max(T &r) const
Take a local variable and replace it with the maximum of it&#39;s values on all processors.
The ExodusII_IO class implements reading meshes in the ExodusII file format from Sandia National Labs...
Definition: exodusII_io.h:52
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh...
Definition: mesh_input.h:97
void deallocate(std::vector< T > &vec)
A convenient method to truly empty a vector using the "swap trick".
Definition: utility.h:244
std::size_t n_edge_conds() const
virtual void write_nodal_data(const std::string &fname, const std::vector< Number > &soln, const std::vector< std::string > &names) libmesh_override
Output a nodal solution.
Definition: nemesis_io.C:1363
processor_id_type n_processors() const
This is the base class from which all geometric element types are derived.
Definition: elem.h:89
MeshBase & mesh
ExodusII_IO_Helper::Conversion assign_conversion(std::string type_str)
void write_information_records(const std::vector< std::string > &)
Write out information records.
Definition: nemesis_io.C:1455
virtual const Node * node_ptr(const dof_id_type i) const =0
std::size_t n_boundary_conds() const
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end...
const MT & mesh() const
Definition: mesh_output.h:216
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
The libMesh namespace provides an interface to certain functionality in the library.
void gather_neighboring_elements(DistributedMesh &) const
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
bool _verbose
Controls whether extra debugging information is printed to the screen or not.
Definition: nemesis_io.h:140
This is the MeshBase class.
Definition: mesh_base.h:68
dof_id_type & set_id()
Definition: dof_object.h:641
virtual void write(const std::string &base_filename) libmesh_override
This method implements writing a mesh to a specified file.
Definition: nemesis_io.C:1180
libmesh_assert(j)
This is the Nemesis_IO_Helper class.
virtual ~Nemesis_IO()
Destructor.
Definition: nemesis_io.C:108
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
void write_timestep(const std::string &fname, const EquationSystems &es, const int timestep, const Real time)
Write one timestep&#39;s worth of the solution.
Definition: nemesis_io.C:1242
This is the MeshCommunication class.
void prepare_to_write_nodal_data(const std::string &fname, const std::vector< std::string > &names)
Helper function containing code shared between the two different versions of write_nodal_data which t...
Definition: nemesis_io.C:1269
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:50
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
Encapsulates the MPI tag integers.
Definition: parallel.h:227
virtual dof_id_type parallel_n_elem() const =0
bool _append
Default false.
Definition: nemesis_io.h:146
MPI_Status status
Status object for querying messages.
Definition: parallel.h:176
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
Blocking-send to one processor with data-defined type.
subdomain_id_type subdomain_id() const
Definition: elem.h:1951
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:199
This class forms the base class for all other classes that are expected to be implemented in parallel...
virtual dof_id_type parallel_n_nodes() const =0
virtual void write_equation_systems(const std::string &, const EquationSystems &, const std::set< std::string > *system_names=libmesh_nullptr)
This method implements writing a mesh with data to a specified file where the data is taken from the ...
int _timestep
Keeps track of the current timestep index being written.
Definition: nemesis_io.h:134
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual void read(const std::string &name) libmesh_override
This method implements reading a mesh from a specified file.
Definition: exodusII_io.C:116
void write_global_data(const std::vector< Number > &, const std::vector< std::string > &)
Write out global variables.
Definition: nemesis_io.C:1390
const Parallel::Communicator & comm() const
OStreamProxy out
unsigned int mesh_dimension() const
Definition: mesh_base.C:148
virtual void read(const std::string &base_filename) libmesh_override
Implements reading the mesh from several different files.
Definition: nemesis_io.C:135
void alltoall(std::vector< T > &r) const
Effectively transposes the input vector across all processors.
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
static const bool value
Definition: xdr_io.C:108
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:91
virtual unsigned int dim() const =0
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
Blocking-receive from one processor with data-defined type.
void append(bool val)
If true, this flag will cause the Nemesis_IO object to attempt to open an existing file for writing...
Definition: nemesis_io.C:127
UniquePtr< Nemesis_IO_Helper > nemhelper
Definition: nemesis_io.h:128
virtual void delete_remote_elements()
When supported, deletes all nonlocal elements of the mesh except for "ghosts" which touch a local ele...
Definition: mesh_base.h:182
if(!eq) SETERRQ2(((PetscObject) dm) -> comm, PETSC_ERR_ARG_WRONG,"DM of type %s, not of type %s",((PetscObject) dm) ->type, DMLIBMESH)
virtual void update_post_partitioning()
Recalculate any cached data after elements and nodes have been repartitioned.
Definition: mesh_base.h:741
dof_id_type id() const
Definition: dof_object.h:632
virtual dof_id_type n_nodes() const =0
virtual const Elem * elem_ptr(const dof_id_type i) const =0
virtual dof_id_type n_elem() const =0
Nemesis_IO(MeshBase &mesh, bool single_precision=false)
Constructor.
Definition: nemesis_io.C:85
void sum(T &r) const
Take a local variable and replace it with the sum of it&#39;s values on all processors.
long double min(long double a, double b)
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
void verbose(bool set_verbosity)
Set the flag indicating if we should be verbose.
Definition: nemesis_io.C:114
MessageTag get_unique_tag(int tagvalue) const
Get a tag that is unique to this Communicator.
status probe(const unsigned int src_processor_id, const MessageTag &tag=any_tag, const Communicator &comm=Communicator_World)
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
processor_id_type processor_id() const
processor_id_type processor_id() const
Definition: dof_object.h:694
void allgather(const T &send, std::vector< T > &recv) const
Take a vector of length this->size(), and fill in recv[processor_id] = the value of send on that proc...