libMesh
nemesis_io_helper.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2017 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ headers
20 #include <iomanip>
21 #include <set>
22 #include <sstream>
23 
24 // Libmesh headers
25 #include "libmesh/nemesis_io_helper.h"
26 #include "libmesh/node.h"
27 #include "libmesh/elem.h"
28 #include "libmesh/boundary_info.h"
29 #include "libmesh/parallel.h"
30 #include "libmesh/mesh_base.h"
31 #include "libmesh/numeric_vector.h"
32 
33 #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)
34 
35 namespace libMesh
36 {
37 
38 
39 // Initialize the various integer members to zero. We can check
40 // these later to see if they've been properly initialized...
41 // The parent ExodusII_IO_Helper is created with the run_only_on_proc0
42 // flag set to false, so that we can make use of its functionality
43 // on multiple processors.
45  bool verbose_in, bool single_precision) :
46  ExodusII_IO_Helper(parent, verbose_in, /*run_only_on_proc0=*/false, /*single_precision=*/single_precision),
47  nemesis_err_flag(0),
48  num_nodes_global(0),
49  num_elems_global(0),
50  num_elem_blks_global(0),
51  num_node_sets_global(0),
52  num_side_sets_global(0),
53  num_proc(0),
54  num_proc_in_file(0),
55  ftype('\0'),
56  num_internal_nodes(0),
57  num_border_nodes(0),
58  num_external_nodes(0),
59  num_internal_elems(0),
60  num_border_elems(0),
61  num_node_cmaps(0),
62  num_elem_cmaps(0)
63 {
64  // Warn about using untested code!
65  libmesh_experimental();
66 }
67 
68 
70 {
71  // Our destructor is called from Nemesis_IO. We close the Exodus file here since we have
72  // responsibility for managing the file's lifetime. Only call ex_update() if the file was
73  // opened for writing!
74  if (this->opened_for_writing)
75  {
76  this->ex_err = exII::ex_update(this->ex_id);
77  EX_EXCEPTIONLESS_CHECK_ERR(ex_err, "Error flushing buffers to file.");
78  }
79  this->close();
80 }
81 
82 
83 
85 {
87  Nemesis::ne_get_init_global(ex_id,
93  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial global data!");
94 
95  if (verbose)
96  {
97  libMesh::out << "[" << this->processor_id() << "] " << "num_nodes_global=" << num_nodes_global << std::endl;
98  libMesh::out << "[" << this->processor_id() << "] " << "num_elems_global=" << num_elems_global << std::endl;
99  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_blks_global=" << num_elem_blks_global << std::endl;
100  libMesh::out << "[" << this->processor_id() << "] " << "num_node_sets_global=" << num_node_sets_global << std::endl;
101  libMesh::out << "[" << this->processor_id() << "] " << "num_side_sets_global=" << num_side_sets_global << std::endl;
102  }
103 }
104 
105 
106 
108 {
109  if (num_side_sets_global > 0)
110  {
113 
114  // df = "distribution factor", not really sure what that is. I don't yet have a file
115  // which has distribution factors so I guess we'll worry about it later...
117 
119  Nemesis::ne_get_ss_param_global(ex_id,
120  &global_sideset_ids[0],
123  EX_CHECK_ERR(nemesis_err_flag, "Error reading global sideset parameters!");
124 
125  if (verbose)
126  {
127  libMesh::out << "[" << this->processor_id() << "] " << "Global Sideset IDs, Side Counts, and DF counts:" << std::endl;
128  for (std::size_t bn=0; bn<global_sideset_ids.size(); ++bn)
129  {
130  libMesh::out << " [" << this->processor_id() << "] "
131  << "global_sideset_ids["<<bn<<"]=" << global_sideset_ids[bn]
132  << ", num_global_side_counts["<<bn<<"]=" << num_global_side_counts[bn]
133  << ", num_global_side_df_counts["<<bn<<"]=" << num_global_side_df_counts[bn]
134  << std::endl;
135  }
136  }
137  }
138 }
139 
140 
141 
142 
144 {
145  if (num_node_sets_global > 0)
146  {
150 
152  Nemesis::ne_get_ns_param_global(ex_id,
153  &global_nodeset_ids[0],
156  EX_CHECK_ERR(nemesis_err_flag, "Error reading global nodeset parameters!");
157 
158  if (verbose)
159  {
160  libMesh::out << "[" << this->processor_id() << "] " << "Global Nodeset IDs, Node Counts, and DF counts:" << std::endl;
161  for (std::size_t bn=0; bn<global_nodeset_ids.size(); ++bn)
162  {
163  libMesh::out << " [" << this->processor_id() << "] "
164  << "global_nodeset_ids["<<bn<<"]=" << global_nodeset_ids[bn]
165  << ", num_global_node_counts["<<bn<<"]=" << num_global_node_counts[bn]
166  << ", num_global_node_df_counts["<<bn<<"]=" << num_global_node_df_counts[bn]
167  << std::endl;
168  }
169  }
170  }
171 }
172 
173 
174 
176 {
179 
180  if (num_elem_blks_global > 0)
181  {
183  Nemesis::ne_get_eb_info_global(ex_id,
186  EX_CHECK_ERR(nemesis_err_flag, "Error reading global element block info!");
187  }
188 
189  if (verbose)
190  {
191  libMesh::out << "[" << this->processor_id() << "] " << "Global Element Block IDs and Counts:" << std::endl;
192  for (std::size_t bn=0; bn<global_elem_blk_ids.size(); ++bn)
193  {
194  libMesh::out << " [" << this->processor_id() << "] "
195  << "global_elem_blk_ids["<<bn<<"]=" << global_elem_blk_ids[bn]
196  << ", global_elem_blk_cnts["<<bn<<"]=" << global_elem_blk_cnts[bn]
197  << std::endl;
198  }
199  }
200 }
201 
202 
203 
205 {
207  Nemesis::ne_get_init_info(ex_id,
208  &num_proc,
210  &ftype);
211  EX_CHECK_ERR(nemesis_err_flag, "Error reading initial info!");
212 
213  if (verbose)
214  {
215  libMesh::out << "[" << this->processor_id() << "] " << "num_proc=" << num_proc << std::endl;
216  libMesh::out << "[" << this->processor_id() << "] " << "num_proc_in_file=" << num_proc_in_file << std::endl;
217  libMesh::out << "[" << this->processor_id() << "] " << "ftype=" << ftype << std::endl;
218  }
219 }
220 
221 
222 
224 {
226  Nemesis::ne_get_loadbal_param(ex_id,
234  this->processor_id() // The ID of the processor for which info is to be read
235  );
236  EX_CHECK_ERR(nemesis_err_flag, "Error reading load balance parameters!");
237 
238 
239  if (verbose)
240  {
241  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_nodes=" << num_internal_nodes << std::endl;
242  libMesh::out << "[" << this->processor_id() << "] " << "num_border_nodes=" << num_border_nodes << std::endl;
243  libMesh::out << "[" << this->processor_id() << "] " << "num_external_nodes=" << num_external_nodes << std::endl;
244  libMesh::out << "[" << this->processor_id() << "] " << "num_internal_elems=" << num_internal_elems << std::endl;
245  libMesh::out << "[" << this->processor_id() << "] " << "num_border_elems=" << num_border_elems << std::endl;
246  libMesh::out << "[" << this->processor_id() << "] " << "num_node_cmaps=" << num_node_cmaps << std::endl;
247  libMesh::out << "[" << this->processor_id() << "] " << "num_elem_cmaps=" << num_elem_cmaps << std::endl;
248  }
249 }
250 
251 
252 
254 {
256  elem_mapb.resize(num_border_elems);
257 
259  Nemesis::ne_get_elem_map(ex_id,
260  elem_mapi.empty() ? libmesh_nullptr : &elem_mapi[0],
261  elem_mapb.empty() ? libmesh_nullptr : &elem_mapb[0],
262  this->processor_id()
263  );
264  EX_CHECK_ERR(nemesis_err_flag, "Error reading element maps!");
265 
266 
267  if (verbose)
268  {
269  libMesh::out << "[" << this->processor_id() << "] elem_mapi[i] = ";
270  for (unsigned int i=0; i<static_cast<unsigned int>(num_internal_elems-1); ++i)
271  libMesh::out << elem_mapi[i] << ", ";
272  libMesh::out << "... " << elem_mapi.back() << std::endl;
273 
274  libMesh::out << "[" << this->processor_id() << "] elem_mapb[i] = ";
275  for (unsigned int i=0; i<static_cast<unsigned int>(std::min(10, num_border_elems-1)); ++i)
276  libMesh::out << elem_mapb[i] << ", ";
277  libMesh::out << "... " << elem_mapb.back() << std::endl;
278  }
279 }
280 
281 
282 
283 
285 {
287  node_mapb.resize(num_border_nodes);
289 
291  Nemesis::ne_get_node_map(ex_id,
292  node_mapi.empty() ? libmesh_nullptr : &node_mapi[0],
293  node_mapb.empty() ? libmesh_nullptr : &node_mapb[0],
294  node_mape.empty() ? libmesh_nullptr : &node_mape[0],
295  this->processor_id()
296  );
297  EX_CHECK_ERR(nemesis_err_flag, "Error reading node maps!");
298 
299  if (verbose)
300  {
301  // Remark: The Exodus/Nemesis node numbering is always (?) 1-based! This means the first interior node id will
302  // always be == 1.
303  libMesh::out << "[" << this->processor_id() << "] " << "first interior node id=" << node_mapi[0] << std::endl;
304  libMesh::out << "[" << this->processor_id() << "] " << "last interior node id=" << node_mapi.back() << std::endl;
305 
306  libMesh::out << "[" << this->processor_id() << "] " << "first boundary node id=" << node_mapb[0] << std::endl;
307  libMesh::out << "[" << this->processor_id() << "] " << "last boundary node id=" << node_mapb.back() << std::endl;
308 
309  // The number of external nodes is sometimes zero, don't try to access
310  // node_mape.back() in this case!
311  if (num_external_nodes > 0)
312  {
313  libMesh::out << "[" << this->processor_id() << "] " << "first external node id=" << node_mape[0] << std::endl;
314  libMesh::out << "[" << this->processor_id() << "] " << "last external node id=" << node_mape.back() << std::endl;
315  }
316  }
317 }
318 
319 
320 
322 {
327 
329  Nemesis::ne_get_cmap_params(ex_id,
334  this->processor_id());
335  EX_CHECK_ERR(nemesis_err_flag, "Error reading cmap parameters!");
336 
337 
338  if (verbose)
339  {
340  libMesh::out << "[" << this->processor_id() << "] ";
341  for (std::size_t i=0; i<node_cmap_ids.size(); ++i)
342  libMesh::out << "node_cmap_ids["<<i<<"]=" << node_cmap_ids[i] << " ";
343  libMesh::out << std::endl;
344 
345  libMesh::out << "[" << this->processor_id() << "] ";
346  for (std::size_t i=0; i<node_cmap_node_cnts.size(); ++i)
347  libMesh::out << "node_cmap_node_cnts["<<i<<"]=" << node_cmap_node_cnts[i] << " ";
348  libMesh::out << std::endl;
349 
350  libMesh::out << "[" << this->processor_id() << "] ";
351  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
352  libMesh::out << "elem_cmap_ids["<<i<<"]=" << elem_cmap_ids[i] << " ";
353  libMesh::out << std::endl;
354 
355  libMesh::out << "[" << this->processor_id() << "] ";
356  for (std::size_t i=0; i<elem_cmap_elem_cnts.size(); ++i)
357  libMesh::out << "elem_cmap_elem_cnts["<<i<<"]=" << elem_cmap_elem_cnts[i] << " ";
358  libMesh::out << std::endl;
359  }
360 }
361 
362 
363 
365 {
368 
369  for (std::size_t i=0; i<node_cmap_node_ids.size(); ++i)
370  {
373 
374  // Don't call ne_get_node_cmap() if there is nothing there to
375  // get, Nemesis throws an error in this case.
376  if (node_cmap_node_cnts[i] > 0)
377  {
379  Nemesis::ne_get_node_cmap(ex_id,
380  node_cmap_ids[i],
381  &node_cmap_node_ids[i][0],
382  &node_cmap_proc_ids[i][0],
383  this->processor_id());
384  EX_CHECK_ERR(nemesis_err_flag, "Error reading node cmap node and processor ids!");
385  }
386 
387  if (verbose)
388  {
389  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_ids["<<i<<"]=";
390  for (std::size_t j=0; j<node_cmap_node_ids[i].size(); ++j)
391  libMesh::out << node_cmap_node_ids[i][j] << " ";
392  libMesh::out << std::endl;
393 
394  // This is basically a vector, all entries of which are = node_cmap_ids[i]
395  // Not sure if it's always guaranteed to be that or what...
396  libMesh::out << "[" << this->processor_id() << "] node_cmap_proc_ids["<<i<<"]=";
397  for (std::size_t j=0; j<node_cmap_proc_ids[i].size(); ++j)
398  libMesh::out << node_cmap_proc_ids[i][j] << " ";
399  libMesh::out << std::endl;
400  }
401  }
402 }
403 
404 
405 
407 {
411 
412  for (std::size_t i=0; i<elem_cmap_elem_ids.size(); ++i)
413  {
417 
418  if (elem_cmap_elem_cnts[i] > 0)
419  {
421  Nemesis::ne_get_elem_cmap(ex_id,
422  elem_cmap_ids[i],
423  &elem_cmap_elem_ids[i][0],
424  &elem_cmap_side_ids[i][0],
425  &elem_cmap_proc_ids[i][0],
426  this->processor_id());
427  EX_CHECK_ERR(nemesis_err_flag, "Error reading elem cmap elem, side, and processor ids!");
428  }
429 
430  if (verbose)
431  {
432  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_ids["<<i<<"]=";
433  for (std::size_t j=0; j<elem_cmap_elem_ids[i].size(); ++j)
434  libMesh::out << elem_cmap_elem_ids[i][j] << " ";
435  libMesh::out << std::endl;
436 
437  // These must be the (local) side IDs (in the ExodusII face numbering scheme)
438  // of the sides shared across processors.
439  libMesh::out << "[" << this->processor_id() << "] elem_cmap_side_ids["<<i<<"]=";
440  for (std::size_t j=0; j<elem_cmap_side_ids[i].size(); ++j)
441  libMesh::out << elem_cmap_side_ids[i][j] << " ";
442  libMesh::out << std::endl;
443 
444  // This is basically a vector, all entries of which are = elem_cmap_ids[i]
445  // Not sure if it's always guaranteed to be that or what...
446  libMesh::out << "[" << this->processor_id() << "] elem_cmap_proc_ids["<<i<<"]=";
447  for (std::size_t j=0; j<elem_cmap_proc_ids[i].size(); ++j)
448  libMesh::out << elem_cmap_proc_ids[i][j] << " ";
449  libMesh::out << std::endl;
450  }
451  }
452 }
453 
454 
455 
456 
457 void Nemesis_IO_Helper::put_init_info(unsigned num_proc_in,
458  unsigned num_proc_in_file_in,
459  const char * ftype_in)
460 {
462  Nemesis::ne_put_init_info(ex_id,
463  num_proc_in,
464  num_proc_in_file_in,
465  const_cast<char *>(ftype_in));
466 
467  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial information!");
468 }
469 
470 
471 
472 
474  dof_id_type num_elems_global_in,
475  unsigned num_elem_blks_global_in,
476  unsigned num_node_sets_global_in,
477  unsigned num_side_sets_global_in)
478 {
480  Nemesis::ne_put_init_global(ex_id,
481  num_nodes_global_in,
482  num_elems_global_in,
483  num_elem_blks_global_in,
484  num_node_sets_global_in,
485  num_side_sets_global_in);
486 
487  EX_CHECK_ERR(nemesis_err_flag, "Error writing initial global data!");
488 }
489 
490 
491 
492 void Nemesis_IO_Helper::put_eb_info_global(std::vector<int> & global_elem_blk_ids_in,
493  std::vector<int> & global_elem_blk_cnts_in)
494 {
496  Nemesis::ne_put_eb_info_global(ex_id,
497  &global_elem_blk_ids_in[0],
498  &global_elem_blk_cnts_in[0]);
499 
500  EX_CHECK_ERR(nemesis_err_flag, "Error writing global element block information!");
501 }
502 
503 
504 
505 
506 void Nemesis_IO_Helper::put_ns_param_global(std::vector<int> & global_nodeset_ids_in,
507  std::vector<int> & num_global_node_counts_in,
508  std::vector<int> & num_global_node_df_counts_in)
509 {
510  // Only add nodesets if there are some
511  if (global_nodeset_ids.size())
512  {
514  Nemesis::ne_put_ns_param_global(ex_id,
515  &global_nodeset_ids_in[0],
516  &num_global_node_counts_in[0],
517  &num_global_node_df_counts_in[0]);
518  }
519 
520  EX_CHECK_ERR(nemesis_err_flag, "Error writing global nodeset parameters!");
521 }
522 
523 
524 
525 
526 void Nemesis_IO_Helper::put_ss_param_global(std::vector<int> & global_sideset_ids_in,
527  std::vector<int> & num_global_side_counts_in,
528  std::vector<int> & num_global_side_df_counts_in)
529 {
530  // Only add sidesets if there are some
531  if (global_sideset_ids.size())
532  {
534  Nemesis::ne_put_ss_param_global(ex_id,
535  &global_sideset_ids_in[0],
536  &num_global_side_counts_in[0],
537  &num_global_side_df_counts_in[0]);
538  }
539 
540  EX_CHECK_ERR(nemesis_err_flag, "Error writing global sideset parameters!");
541 }
542 
543 
544 
545 
546 void Nemesis_IO_Helper::put_loadbal_param(unsigned num_internal_nodes_in,
547  unsigned num_border_nodes_in,
548  unsigned num_external_nodes_in,
549  unsigned num_internal_elems_in,
550  unsigned num_border_elems_in,
551  unsigned num_node_cmaps_in,
552  unsigned num_elem_cmaps_in)
553 {
555  Nemesis::ne_put_loadbal_param(ex_id,
556  num_internal_nodes_in,
557  num_border_nodes_in,
558  num_external_nodes_in,
559  num_internal_elems_in,
560  num_border_elems_in,
561  num_node_cmaps_in,
562  num_elem_cmaps_in,
563  this->processor_id());
564 
565  EX_CHECK_ERR(nemesis_err_flag, "Error writing loadbal parameters!");
566 }
567 
568 
569 
570 
571 
572 void Nemesis_IO_Helper::put_cmap_params(std::vector<int> & node_cmap_ids_in,
573  std::vector<int> & node_cmap_node_cnts_in,
574  std::vector<int> & elem_cmap_ids_in,
575  std::vector<int> & elem_cmap_elem_cnts_in)
576 {
577  // We might not have cmaps on every processor in some corner
578  // cases
579  if (node_cmap_ids.size())
580  {
582  Nemesis::ne_put_cmap_params(ex_id,
583  &node_cmap_ids_in[0],
584  &node_cmap_node_cnts_in[0],
585  &elem_cmap_ids_in[0],
586  &elem_cmap_elem_cnts_in[0],
587  this->processor_id());
588  }
589 
590  EX_CHECK_ERR(nemesis_err_flag, "Error writing cmap parameters!");
591 }
592 
593 
594 
595 
596 void Nemesis_IO_Helper::put_node_cmap(std::vector<std::vector<int>> & node_cmap_node_ids_in,
597  std::vector<std::vector<int>> & node_cmap_proc_ids_in)
598 {
599 
600  // Print to screen what we are about to print to Nemesis file
601  if (verbose)
602  {
603  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
604  {
605  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : nodes communicated to proc "
606  << this->node_cmap_ids[i]
607  << " = ";
608  for (std::size_t j=0; j<node_cmap_node_ids_in[i].size(); ++j)
609  libMesh::out << node_cmap_node_ids_in[i][j] << " ";
610  libMesh::out << std::endl;
611  }
612 
613  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
614  {
615  libMesh::out << "[" << this->processor_id() << "] put_node_cmap() : processor IDs = ";
616  for (std::size_t j=0; j<node_cmap_proc_ids_in[i].size(); ++j)
617  libMesh::out << node_cmap_proc_ids_in[i][j] << " ";
618  libMesh::out << std::endl;
619  }
620  }
621 
622  for (std::size_t i=0; i<node_cmap_node_ids_in.size(); ++i)
623  {
625  Nemesis::ne_put_node_cmap(ex_id,
626  this->node_cmap_ids[i],
627  &node_cmap_node_ids_in[i][0],
628  &node_cmap_proc_ids_in[i][0],
629  this->processor_id());
630 
631  EX_CHECK_ERR(nemesis_err_flag, "Error writing node communication map to file!");
632  }
633 }
634 
635 
636 
637 
638 void Nemesis_IO_Helper::put_node_map(std::vector<int> & node_mapi_in,
639  std::vector<int> & node_mapb_in,
640  std::vector<int> & node_mape_in)
641 {
643  Nemesis::ne_put_node_map(ex_id,
644  node_mapi_in.empty() ? libmesh_nullptr : &node_mapi_in[0],
645  node_mapb_in.empty() ? libmesh_nullptr : &node_mapb_in[0],
646  node_mape_in.empty() ? libmesh_nullptr : &node_mape_in[0], // Don't take address of empty vector...
647  this->processor_id());
648 
649  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border node maps to file!");
650 }
651 
652 
653 
654 
655 void Nemesis_IO_Helper::put_elem_cmap(std::vector<std::vector<int>> & elem_cmap_elem_ids_in,
656  std::vector<std::vector<int>> & elem_cmap_side_ids_in,
657  std::vector<std::vector<int>> & elem_cmap_proc_ids_in)
658 {
659  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
660  {
662  Nemesis::ne_put_elem_cmap(ex_id,
663  this->elem_cmap_ids[i],
664  &elem_cmap_elem_ids_in[i][0],
665  &elem_cmap_side_ids_in[i][0],
666  &elem_cmap_proc_ids_in[i][0],
667  this->processor_id());
668 
669  EX_CHECK_ERR(nemesis_err_flag, "Error writing elem communication map to file!");
670  }
671 }
672 
673 
674 
675 
676 void Nemesis_IO_Helper::put_elem_map(std::vector<int> & elem_mapi_in,
677  std::vector<int> & elem_mapb_in)
678 {
680  Nemesis::ne_put_elem_map(ex_id,
681  elem_mapi_in.empty() ? libmesh_nullptr : &elem_mapi_in[0],
682  elem_mapb_in.empty() ? libmesh_nullptr : &elem_mapb_in[0],
683  this->processor_id());
684 
685  EX_CHECK_ERR(nemesis_err_flag, "Error writing Nemesis internal and border element maps to file!");
686 }
687 
688 
689 
690 
691 
692 
693 void Nemesis_IO_Helper::put_n_coord(unsigned start_node_num,
694  unsigned num_nodes_in,
695  std::vector<Real> & x_coor,
696  std::vector<Real> & y_coor,
697  std::vector<Real> & z_coor)
698 {
700  Nemesis::ne_put_n_coord(ex_id,
701  start_node_num,
702  num_nodes_in,
703  &x_coor[0],
704  &y_coor[0],
705  &z_coor[0]);
706 
707  EX_CHECK_ERR(nemesis_err_flag, "Error writing coords to file!");
708 }
709 
710 
711 
712 
713 
714 
715 
716 
717 // Note: we can't reuse the ExodusII_IO_Helper code directly, since it only runs
718 // on processor 0. TODO: We could have the body of this function as a separate
719 // function and then ExodusII_IO_Helper would only call it if on processor 0...
720 void Nemesis_IO_Helper::create(std::string filename)
721 {
722  // Fall back on double precision when necessary since ExodusII
723  // doesn't seem to support long double
724  int
725  comp_ws = 0,
726  io_ws = 0;
727 
728  if (_single_precision)
729  {
730  comp_ws = sizeof(float);
731  io_ws = sizeof(float);
732  }
733  else
734  {
735  comp_ws = cast_int<int>(std::min(sizeof(Real), sizeof(double)));
736  io_ws = cast_int<int>(std::min(sizeof(Real), sizeof(double)));
737  }
738 
739  this->ex_id = exII::ex_create(filename.c_str(), EX_CLOBBER, &comp_ws, &io_ws);
740 
741  EX_CHECK_ERR(ex_id, "Error creating Nemesis mesh file.");
742 
743  if (verbose)
744  libMesh::out << "File created successfully." << std::endl;
745 
746  this->opened_for_writing = true;
747 }
748 
749 
750 
751 
752 
753 
754 
755 
756 void Nemesis_IO_Helper::initialize(std::string title_in, const MeshBase & mesh, bool /*use_discontinuous*/)
757 {
758  // Make sure that the reference passed in is really a DistributedMesh
759  // const DistributedMesh & pmesh = cast_ref<const DistributedMesh &>(mesh);
760  const MeshBase & pmesh = mesh;
761 
762  // According to Nemesis documentation, first call when writing should be to
763  // ne_put_init_info(). Our reader doesn't actually call this, but we should
764  // strive to be as close to a normal nemesis file as possible...
765  this->put_init_info(this->n_processors(), 1, "p");
766 
767 
768  // Gather global "initial" information for Nemesis. This consists of
769  // three parts labeled I, II, and III below...
770 
771  //
772  // I.) Need to compute the number of global element blocks. To be consistent with
773  // Exodus, we also incorrectly associate the number of element blocks with the
774  // number of libmesh subdomains...
775  //
776  this->compute_num_global_elem_blocks(pmesh);
777 
778  //
779  // II.) Determine the global number of nodesets by communication.
780  // This code relies on BoundaryInfo storing side and node
781  // boundary IDs separately at the time they are added to the
782  // BoundaryInfo object.
783  //
784  this->compute_num_global_nodesets(pmesh);
785 
786  //
787  // III.) Need to compute the global number of sidesets by communication:
788  // This code relies on BoundaryInfo storing side and node
789  // boundary IDs separately at the time they are added to the
790  // BoundaryInfo object.
791  //
792  this->compute_num_global_sidesets(pmesh);
793 
794  // Now write the global data obtained in steps I, II, and III to the Nemesis file
795  this->put_init_global(pmesh.parallel_n_nodes(),
796  pmesh.parallel_n_elem(),
797  this->num_elem_blks_global, /* I. */
798  this->num_node_sets_global, /* II. */
799  this->num_side_sets_global /* III. */
800  );
801 
802  // Next, we'll write global element block information to the file. This was already
803  // gathered in step I. above
805  this->global_elem_blk_cnts);
806 
807 
808  // Next, write global nodeset information to the file. This was already gathered in
809  // step II. above.
810  this->num_global_node_df_counts.clear();
811  this->num_global_node_df_counts.resize(this->global_nodeset_ids.size()); // distribution factors all zero...
815 
816 
817  // Next, write global sideset information to the file. This was already gathered in
818  // step III. above.
819  this->num_global_side_df_counts.clear();
820  this->num_global_side_df_counts.resize(this->global_sideset_ids.size()); // distribution factors all zero...
824 
825 
826  // Before we go any further we need to derive consistent node and
827  // element numbering schemes for all local elems and nodes connected
828  // to local elements.
829  //
830  // Must be called *after* the local_subdomain_counts map has been constructed
831  // by the compute_num_global_elem_blocks() function!
832  this->build_element_and_node_maps(pmesh);
833 
834  // Next step is to write "load balance" parameters. Several things need to
835  // be computed first though...
836 
837  // First we'll collect IDs of border nodes.
838  this->compute_border_node_ids(pmesh);
839 
840  // Next we'll collect numbers of internal and border elements, and internal nodes.
841  // Note: "A border node does not a border element make...", that is, just because one
842  // of an element's nodes has been identified as a border node, the element is not
843  // necessarily a border element. It must have a side on the boundary between processors,
844  // i.e. have a face neighbor with a different processor id...
846 
847  // Finally we are ready to write the loadbal information to the file
849  this->num_border_nodes,
850  this->num_external_nodes,
851  this->num_internal_elems,
852  this->num_border_elems,
853  this->num_node_cmaps,
854  this->num_elem_cmaps);
855 
856 
857  // Now we need to compute the "communication map" parameters. These are basically
858  // lists of nodes and elements which need to be communicated between different processors
859  // when the mesh file is read back in.
861 
862  // Write communication map parameters to file.
863  this->put_cmap_params(this->node_cmap_ids,
864  this->node_cmap_node_cnts,
865  this->elem_cmap_ids,
866  this->elem_cmap_elem_cnts);
867 
868 
869  // Ready the node communication maps. The node IDs which
870  // are communicated are the ones currently stored in
871  // proc_nodes_touched_intersections.
873 
874  // Write the packed node communication vectors to file.
875  this->put_node_cmap(this->node_cmap_node_ids,
876  this->node_cmap_proc_ids);
877 
878 
879  // Ready the node maps. These have nothing to do with communication, they map
880  // the nodes to internal, border, and external nodes in the file.
881  this->compute_node_maps();
882 
883  // Call the Nemesis API to write the node maps to file.
884  this->put_node_map(this->node_mapi,
885  this->node_mapb,
886  this->node_mape);
887 
888 
889 
890  // Ready the element communication maps. This includes border
891  // element IDs, sides which are on the border, and the processors to which
892  // they are to be communicated...
894 
895 
896 
897  // Call the Nemesis API to write the packed element communication maps vectors to file
898  this->put_elem_cmap(this->elem_cmap_elem_ids,
899  this->elem_cmap_side_ids,
900  this->elem_cmap_proc_ids);
901 
902 
903 
904 
905 
906 
907  // Ready the Nemesis element maps (internal and border) for writing to file.
908  this->compute_element_maps();
909 
910  // Call the Nemesis API to write the internal and border element IDs.
911  this->put_elem_map(this->elem_mapi,
912  this->elem_mapb);
913 
914 
915  // Now write Exodus-specific initialization information, some of which is
916  // different when you are using Nemesis.
917  this->write_exodus_initialization_info(pmesh, title_in);
918 } // end initialize()
919 
920 
921 
922 
923 
924 
926  const std::string & title_in)
927 {
928  // This follows the convention of Exodus: we always write out the mesh as LIBMESH_DIM-dimensional,
929  // even if it is 2D...
930  this->num_dim = LIBMESH_DIM;
931 
932  this->num_elem = static_cast<unsigned int>(std::distance (pmesh.active_local_elements_begin(),
933  pmesh.active_local_elements_end()));
934 
935  // Exodus will also use *global* number of side and node sets,
936  // though it will not write out entries for all of them...
937  this->num_side_sets =
938  cast_int<int>(this->global_sideset_ids.size());
939  this->num_node_sets =
940  cast_int<int>(this->global_nodeset_ids.size());
941 
942  // We need to write the global number of blocks, even though this processor might not have
943  // elements in some of them!
944  this->num_elem_blk = this->num_elem_blks_global;
945 
946  ex_err = exII::ex_put_init(ex_id,
947  title_in.c_str(),
948  this->num_dim,
949  this->num_nodes,
950  this->num_elem,
951  this->num_elem_blk,
952  this->num_node_sets,
953  this->num_side_sets);
954 
955  EX_CHECK_ERR(ex_err, "Error initializing new Nemesis file.");
956 }
957 
958 
959 
960 
961 
963 {
964  // Make sure we don't have any leftover info
965  this->elem_mapi.clear();
966  this->elem_mapb.clear();
967 
968  // Copy set contents into vectors
969  this->elem_mapi.resize(this->internal_elem_ids.size());
970  this->elem_mapb.resize(this->border_elem_ids.size());
971 
972  {
973  unsigned cnt = 0;
974  std::set<unsigned>::iterator
975  it = this->internal_elem_ids.begin(),
976  end = this->internal_elem_ids.end();
977 
978  for (; it != end; ++it, ++cnt)
979  {
980  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find(*it);
981  if (elem_it == libmesh_elem_num_to_exodus.end())
982  libmesh_error_msg("Elem number " << *it << " not found in libmesh_elem_num_to_exodus map.");
983  this->elem_mapi[cnt] = elem_it->second;
984  }
985  }
986 
987  {
988  unsigned cnt = 0;
989  std::set<unsigned>::iterator
990  it = this->border_elem_ids.begin(),
991  end = this->border_elem_ids.end();
992 
993  for (; it != end; ++it, ++cnt)
994  {
995  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find(*it);
996  if (elem_it == libmesh_elem_num_to_exodus.end())
997  libmesh_error_msg("Elem number " << *it << " not found in libmesh_elem_num_to_exodus map.");
998  this->elem_mapb[cnt] = elem_it->second;
999  }
1000  }
1001 }
1002 
1003 
1004 
1006 {
1007  // Make sure there is no leftover information
1008  this->elem_cmap_elem_ids.clear();
1009  this->elem_cmap_side_ids.clear();
1010  this->elem_cmap_proc_ids.clear();
1011 
1012  // Allocate enough space for all our element maps
1013  this->elem_cmap_elem_ids.resize(this->num_elem_cmaps);
1014  this->elem_cmap_side_ids.resize(this->num_elem_cmaps);
1015  this->elem_cmap_proc_ids.resize(this->num_elem_cmaps);
1016  {
1017  unsigned cnt=0; // Index into vectors
1019  it = this->proc_border_elem_sets.begin(),
1020  end = this->proc_border_elem_sets.end();
1021 
1022  for (; it != end; ++it)
1023  {
1024  // Make sure the current elem_cmap_id matches the index in our map of node intersections
1025  libmesh_assert_equal_to (static_cast<unsigned>(this->elem_cmap_ids[cnt]), it->first);
1026 
1027  // Get reference to the set of IDs to be packed into the vector
1028  std::set<std::pair<unsigned,unsigned>> & elem_set = it->second;
1029 
1030  // Resize the vectors to receive their payload
1031  this->elem_cmap_elem_ids[cnt].resize(elem_set.size());
1032  this->elem_cmap_side_ids[cnt].resize(elem_set.size());
1033  this->elem_cmap_proc_ids[cnt].resize(elem_set.size());
1034 
1035  std::set<std::pair<unsigned,unsigned>>::iterator elem_set_iter = elem_set.begin();
1036 
1037  // Pack the vectors with elem IDs, side IDs, and processor IDs.
1038  for (std::size_t j=0; j<this->elem_cmap_elem_ids[cnt].size(); ++j, ++elem_set_iter)
1039  {
1040  std::map<int, int>::iterator elem_it = libmesh_elem_num_to_exodus.find((*elem_set_iter).first);
1041 
1042  if (elem_it == libmesh_elem_num_to_exodus.end())
1043  libmesh_error_msg("Elem number " << (*elem_set_iter).first << " not found in libmesh_elem_num_to_exodus map.");
1044 
1045  this->elem_cmap_elem_ids[cnt][j] = elem_it->second;
1046  this->elem_cmap_side_ids[cnt][j] = (*elem_set_iter).second; // Side ID, this has already been converted above
1047  this->elem_cmap_proc_ids[cnt][j] = it->first; // All have the same processor ID
1048  }
1049 
1050  // increment vector index to go to next processor
1051  cnt++;
1052  }
1053  } // end scope for packing
1054 }
1055 
1056 
1057 
1058 
1059 
1061 {
1062  // Make sure we don't have any leftover information
1063  this->node_mapi.clear();
1064  this->node_mapb.clear();
1065  this->node_mape.clear();
1066 
1067  // Make sure there's enough space to hold all our node IDs
1068  this->node_mapi.resize(this->internal_node_ids.size());
1069  this->node_mapb.resize(this->border_node_ids.size());
1070 
1071  // Copy set contents into vectors
1072  {
1073  unsigned cnt = 0;
1074  std::set<unsigned>::iterator
1075  it = this->internal_node_ids.begin(),
1076  end = this->internal_node_ids.end();
1077 
1078  for (; it != end; ++it, ++cnt)
1079  {
1080  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(*it);
1081  if (node_it == libmesh_node_num_to_exodus.end())
1082  libmesh_error_msg("Node number " << *it << " not found in libmesh_node_num_to_exodus map.");
1083  this->node_mapi[cnt] = node_it->second;
1084  }
1085  }
1086 
1087  {
1088  unsigned cnt=0;
1089  std::set<unsigned>::iterator
1090  it = this->border_node_ids.begin(),
1091  end = this->border_node_ids.end();
1092 
1093  for (; it != end; ++it, ++cnt)
1094  {
1095  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(*it);
1096  if (node_it == libmesh_node_num_to_exodus.end())
1097  libmesh_error_msg("Node number " << *it << " not found in libmesh_node_num_to_exodus map.");
1098  this->node_mapb[cnt] = node_it->second;
1099  }
1100  }
1101 }
1102 
1103 
1104 
1105 
1106 
1108 {
1109  // Make sure there's no left-over information
1110  this->node_cmap_node_ids.clear();
1111  this->node_cmap_proc_ids.clear();
1112 
1113  // Allocate enough space for all our node maps
1114  this->node_cmap_node_ids.resize(this->num_node_cmaps);
1115  this->node_cmap_proc_ids.resize(this->num_node_cmaps);
1116  {
1117  unsigned cnt=0; // Index into vectors
1119  it = this->proc_nodes_touched_intersections.begin(),
1120  end = this->proc_nodes_touched_intersections.end();
1121 
1122  for (; it != end; ++it)
1123  {
1124  // Make sure the current node_cmap_id matches the index in our map of node intersections
1125  libmesh_assert_equal_to (static_cast<unsigned>(this->node_cmap_ids[cnt]), it->first);
1126 
1127  // Get reference to the set of IDs to be packed into the vector.
1128  std::set<unsigned> & node_set = it->second;
1129 
1130  // Resize the vectors to receive their payload
1131  this->node_cmap_node_ids[cnt].resize(node_set.size());
1132  this->node_cmap_proc_ids[cnt].resize(node_set.size());
1133 
1134  std::set<unsigned>::iterator node_set_iter = node_set.begin();
1135 
1136  // Pack the vectors with node IDs and processor IDs.
1137  for (std::size_t j=0; j<this->node_cmap_node_ids[cnt].size(); ++j, ++node_set_iter)
1138  {
1139  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(*node_set_iter);
1140  if (node_it == libmesh_node_num_to_exodus.end())
1141  libmesh_error_msg("Node number " << *node_set_iter << " not found in libmesh_node_num_to_exodus map.");
1142 
1143  this->node_cmap_node_ids[cnt][j] = node_it->second;
1144  this->node_cmap_proc_ids[cnt][j] = it->first;
1145  }
1146 
1147  // increment vector index to go to next processor
1148  cnt++;
1149  }
1150  } // end scope for packing
1151 
1152  // Print out the vectors we just packed
1153  if (verbose)
1154  {
1155  for (std::size_t i=0; i<this->node_cmap_node_ids.size(); ++i)
1156  {
1157  libMesh::out << "[" << this->processor_id() << "] nodes communicated to proc "
1158  << this->node_cmap_ids[i]
1159  << " = ";
1160  for (std::size_t j=0; j<this->node_cmap_node_ids[i].size(); ++j)
1161  libMesh::out << this->node_cmap_node_ids[i][j] << " ";
1162  libMesh::out << std::endl;
1163  }
1164 
1165  for (std::size_t i=0; i<this->node_cmap_node_ids.size(); ++i)
1166  {
1167  libMesh::out << "[" << this->processor_id() << "] processor ID node communicated to = ";
1168  for (std::size_t j=0; j<this->node_cmap_proc_ids[i].size(); ++j)
1169  libMesh::out << this->node_cmap_proc_ids[i][j] << " ";
1170  libMesh::out << std::endl;
1171  }
1172  }
1173 }
1174 
1175 
1176 
1177 
1179 {
1180  // For the nodes, these are the number of entries in the sets in proc_nodes_touched_intersections
1181  // map computed above. Note: this map does not contain self-intersections so we can loop over it
1182  // directly.
1183  this->node_cmap_node_cnts.clear(); // Make sure we don't have any leftover information...
1184  this->node_cmap_ids.clear(); // Make sure we don't have any leftover information...
1185  this->node_cmap_node_cnts.resize(this->num_node_cmaps);
1186  this->node_cmap_ids.resize(this->num_node_cmaps);
1187 
1188  {
1189  unsigned cnt=0; // Index into the vector
1191  it = this->proc_nodes_touched_intersections.begin(),
1192  end = this->proc_nodes_touched_intersections.end();
1193 
1194  for (; it != end; ++it)
1195  {
1196  this->node_cmap_ids[cnt] = it->first; // The ID of the proc we communicate with
1197  this->node_cmap_node_cnts[cnt] = cast_int<int>(it->second.size()); // The number of nodes we communicate
1198  cnt++; // increment vector index!
1199  }
1200  }
1201 
1202  // Print the packed vectors we just filled
1203  if (verbose)
1204  {
1205  libMesh::out << "[" << this->processor_id() << "] node_cmap_node_cnts = ";
1206  for (std::size_t i=0; i<node_cmap_node_cnts.size(); ++i)
1207  libMesh::out << node_cmap_node_cnts[i] << ", ";
1208  libMesh::out << std::endl;
1209 
1210  libMesh::out << "[" << this->processor_id() << "] node_cmap_ids = ";
1211  for (std::size_t i=0; i<node_cmap_ids.size(); ++i)
1212  libMesh::out << node_cmap_ids[i] << ", ";
1213  libMesh::out << std::endl;
1214  }
1215 
1216  // For the elements, we have not yet computed all this information..
1217  this->elem_cmap_elem_cnts.clear(); // Make sure we don't have any leftover information...
1218  this->elem_cmap_ids.clear(); // Make sure we don't have any leftover information...
1219  this->elem_cmap_elem_cnts.resize(this->num_elem_cmaps);
1220  this->elem_cmap_ids.resize(this->num_elem_cmaps);
1221 
1222  // Pack the elem_cmap_ids and elem_cmap_elem_cnts vectors
1223  {
1224  unsigned cnt=0; // Index into the vectors we're filling
1226  it = this->proc_border_elem_sets.begin(),
1227  end = this->proc_border_elem_sets.end();
1228 
1229  for (; it != end; ++it)
1230  {
1231  this->elem_cmap_ids[cnt] = it->first; // The ID of the proc we communicate with
1232  this->elem_cmap_elem_cnts[cnt] = cast_int<int>(it->second.size()); // The number of elems we communicate to/from that proc
1233  cnt++; // increment vector index!
1234  }
1235  }
1236 
1237  // Print the packed vectors we just filled
1238  if (verbose)
1239  {
1240  libMesh::out << "[" << this->processor_id() << "] elem_cmap_elem_cnts = ";
1241  for (std::size_t i=0; i<elem_cmap_elem_cnts.size(); ++i)
1242  libMesh::out << elem_cmap_elem_cnts[i] << ", ";
1243  libMesh::out << std::endl;
1244 
1245  libMesh::out << "[" << this->processor_id() << "] elem_cmap_ids = ";
1246  for (std::size_t i=0; i<elem_cmap_ids.size(); ++i)
1247  libMesh::out << elem_cmap_ids[i] << ", ";
1248  libMesh::out << std::endl;
1249  }
1250 }
1251 
1252 
1253 
1254 
1255 void
1257 {
1258  // Set of all local, active element IDs. After we have identified border element
1259  // IDs, the set_difference between this set and the border_elem_ids set will give us
1260  // the set of internal_elem_ids.
1261  std::set<unsigned> all_elem_ids;
1262 
1263  // A set of processor IDs which elements on this processor have as
1264  // neighbors. The size of this set will determine the number of
1265  // element communication maps in Exodus.
1266  std::set<unsigned> neighboring_processor_ids;
1267 
1268  // Will be used to create conversion objects capable of mapping libmesh
1269  // element numberings into Nemesis numberings.
1270  ExodusII_IO_Helper::ElementMaps element_mapper;
1271 
1272  for (const auto & elem : pmesh.active_local_element_ptr_range())
1273  {
1274  // Add this Elem's ID to all_elem_ids, later we will take the difference
1275  // between this set and the set of border_elem_ids, to get the set of
1276  // internal_elem_ids.
1277  all_elem_ids.insert(elem->id());
1278 
1279  // Will be set to true if element is determined to be a border element
1280  bool is_border_elem = false;
1281 
1282  // Construct a conversion object for this Element. This will help us map
1283  // Libmesh numberings into Nemesis numberings for sides.
1284  ExodusII_IO_Helper::Conversion conv = element_mapper.assign_conversion(elem->type());
1285 
1286  // Add all this element's node IDs to the set of all node IDs.
1287  // The set of internal_node_ids will be the set difference between
1288  // the set of all nodes and the set of border nodes.
1289  //
1290  // In addition, if any node of a local node is listed in the
1291  // border nodes list, then this element goes into the proc_border_elem_sets.
1292  // Note that there is not a 1:1 correspondence between
1293  // border_elem_ids and the entries which go into proc_border_elem_sets.
1294  // The latter is for communication purposes, ie determining which elements
1295  // should be shared between processors.
1296  for (unsigned int node=0; node<elem->n_nodes(); ++node)
1297  {
1298  this->nodes_attached_to_local_elems.insert(elem->node_id(node));
1299  } // end loop over element's nodes
1300 
1301  // Loop over element's neighbors, see if it has a neighbor which is off-processor
1302  for (auto n : elem->side_index_range())
1303  {
1304  if (elem->neighbor_ptr(n) != libmesh_nullptr)
1305  {
1306  unsigned neighbor_proc_id = elem->neighbor_ptr(n)->processor_id();
1307 
1308  // If my neighbor has a different processor ID, I must be a border element.
1309  // Also track the neighboring processor ID if it is are different from our processor ID
1310  if (neighbor_proc_id != this->processor_id())
1311  {
1312  is_border_elem = true;
1313  neighboring_processor_ids.insert(neighbor_proc_id);
1314 
1315  // Convert libmesh side(n) of this element into a side ID for Nemesis
1316  unsigned nemesis_side_id = conv.get_inverse_side_map(n);
1317 
1318  if (verbose)
1319  libMesh::out << "[" << this->processor_id() << "] LibMesh side "
1320  << n
1321  << " mapped to (1-based) Exodus side "
1322  << nemesis_side_id
1323  << std::endl;
1324 
1325  // Add this element's ID and the ID of the side which is on the boundary
1326  // to the set of border elements for this processor.
1327  // Note: if the set does not already exist, this creates it.
1328  this->proc_border_elem_sets[ neighbor_proc_id ].insert( std::make_pair(elem->id(), nemesis_side_id) );
1329  }
1330  }
1331  } // end for loop over neighbors
1332 
1333  // If we're on a border element, add it to the set
1334  if (is_border_elem)
1335  this->border_elem_ids.insert( elem->id() );
1336 
1337  } // end for loop over active local elements
1338 
1339  // Take the set_difference between all elements and border elements to get internal
1340  // element IDs
1341  std::set_difference(all_elem_ids.begin(), all_elem_ids.end(),
1342  this->border_elem_ids.begin(), this->border_elem_ids.end(),
1343  std::inserter(this->internal_elem_ids, this->internal_elem_ids.end()));
1344 
1345  // Take the set_difference between all nodes and border nodes to get internal nodes
1346  std::set_difference(this->nodes_attached_to_local_elems.begin(), this->nodes_attached_to_local_elems.end(),
1347  this->border_node_ids.begin(), this->border_node_ids.end(),
1348  std::inserter(this->internal_node_ids, this->internal_node_ids.end()));
1349 
1350  if (verbose)
1351  {
1352  libMesh::out << "[" << this->processor_id() << "] neighboring_processor_ids = ";
1353  for (std::set<unsigned>::iterator it = neighboring_processor_ids.begin();
1354  it != neighboring_processor_ids.end();
1355  ++it)
1356  {
1357  libMesh::out << *it << " ";
1358  }
1359  libMesh::out << std::endl;
1360  }
1361 
1362  // The size of the neighboring_processor_ids set should be the number of element communication maps
1363  this->num_elem_cmaps =
1364  cast_int<int>(neighboring_processor_ids.size());
1365 
1366  if (verbose)
1367  libMesh::out << "[" << this->processor_id() << "] "
1368  << "Number of neighboring processor IDs="
1369  << this->num_elem_cmaps
1370  << std::endl;
1371 
1372  if (verbose)
1373  {
1374  // Print out counts of border elements for each processor
1376  it != this->proc_border_elem_sets.end(); ++it)
1377  {
1378  libMesh::out << "[" << this->processor_id() << "] "
1379  << "Proc "
1380  << it->first << " communicates "
1381  << it->second.size() << " elements." << std::endl;
1382  }
1383  }
1384 
1385  // Store the number of internal and border elements, and the number of internal nodes,
1386  // to be written to the Nemesis file.
1387  this->num_internal_elems =
1388  cast_int<int>(this->internal_elem_ids.size());
1389  this->num_border_elems =
1390  cast_int<int>(this->border_elem_ids.size());
1391  this->num_internal_nodes =
1392  cast_int<int>(this->internal_node_ids.size());
1393 
1394  if (verbose)
1395  {
1396  libMesh::out << "[" << this->processor_id() << "] num_internal_nodes=" << this->num_internal_nodes << std::endl;
1397  libMesh::out << "[" << this->processor_id() << "] num_border_nodes=" << this->num_border_nodes << std::endl;
1398  libMesh::out << "[" << this->processor_id() << "] num_border_elems=" << this->num_border_elems << std::endl;
1399  libMesh::out << "[" << this->processor_id() << "] num_internal_elems=" << this->num_internal_elems << std::endl;
1400  }
1401 }
1402 
1403 
1404 
1406 {
1407  // 1.) Get reference to the set of side boundary IDs
1408  std::set<boundary_id_type> global_side_boundary_ids
1409  (pmesh.get_boundary_info().get_side_boundary_ids().begin(),
1410  pmesh.get_boundary_info().get_side_boundary_ids().end());
1411 
1412  // 2.) Gather boundary side IDs from other processors
1413  this->comm().set_union(global_side_boundary_ids);
1414 
1415  // 3.) Now global_side_boundary_ids actually contains a global list of all side boundary IDs
1416  this->num_side_sets_global =
1417  cast_int<int>(global_side_boundary_ids.size());
1418 
1419  // 4.) Pack these sidesets into a vector so they can be written by Nemesis
1420  this->global_sideset_ids.clear(); // Make sure there is no leftover information
1421  this->global_sideset_ids.insert(this->global_sideset_ids.end(),
1422  global_side_boundary_ids.begin(),
1423  global_side_boundary_ids.end());
1424 
1425  if (verbose)
1426  {
1427  libMesh::out << "[" << this->processor_id() << "] global_sideset_ids = ";
1428  for (std::size_t i=0; i<this->global_sideset_ids.size(); ++i)
1429  libMesh::out << this->global_sideset_ids[i] << ", ";
1430  libMesh::out << std::endl;
1431  }
1432 
1433  // We also need global counts of sides in each of the sidesets. Again, there may be a
1434  // better way to do this...
1435  std::vector<dof_id_type> bndry_elem_list;
1436  std::vector<unsigned short int> bndry_side_list;
1437  std::vector<boundary_id_type> bndry_id_list;
1438  pmesh.get_boundary_info().build_side_list(bndry_elem_list, bndry_side_list, bndry_id_list);
1439 
1440  // Similarly to the nodes, we can't count any sides for elements which aren't local
1441  std::vector<dof_id_type>::iterator it_elem=bndry_elem_list.begin();
1442  std::vector<unsigned short>::iterator it_side=bndry_side_list.begin();
1443  std::vector<boundary_id_type>::iterator it_id=bndry_id_list.begin();
1444 
1445  // New end iterators, to be updated as we find non-local IDs
1446  std::vector<dof_id_type>::iterator new_bndry_elem_list_end = bndry_elem_list.end();
1447  std::vector<unsigned short>::iterator new_bndry_side_list_end = bndry_side_list.end();
1448  std::vector<boundary_id_type>::iterator new_bndry_id_list_end = bndry_id_list.end();
1449 
1450  for ( ; it_elem != new_bndry_elem_list_end; )
1451  {
1452  if (pmesh.elem_ref(*it_elem).processor_id() != this->processor_id())
1453  {
1454  // Back up the new end iterators to prepare for swap
1455  --new_bndry_elem_list_end;
1456  --new_bndry_side_list_end;
1457  --new_bndry_id_list_end;
1458 
1459  // Swap places, the non-local elem will now be "past-the-end"
1460  std::swap (*it_elem, *new_bndry_elem_list_end);
1461  std::swap (*it_side, *new_bndry_side_list_end);
1462  std::swap (*it_id, *new_bndry_id_list_end);
1463  }
1464  else // elem is local, go to next
1465  {
1466  ++it_elem;
1467  ++it_side;
1468  ++it_id;
1469  }
1470  }
1471 
1472  // Erase from "new" end to old end on each vector.
1473  bndry_elem_list.erase(new_bndry_elem_list_end, bndry_elem_list.end());
1474  bndry_side_list.erase(new_bndry_side_list_end, bndry_side_list.end());
1475  bndry_id_list.erase(new_bndry_id_list_end, bndry_id_list.end());
1476 
1477  this->num_global_side_counts.clear(); // Make sure we don't have any leftover information
1478  this->num_global_side_counts.resize(this->global_sideset_ids.size());
1479 
1480  // Get the count for each global sideset ID
1481  for (std::size_t i=0; i<global_sideset_ids.size(); ++i)
1482  {
1483  this->num_global_side_counts[i] = cast_int<int>
1484  (std::count(bndry_id_list.begin(),
1485  bndry_id_list.end(),
1486  cast_int<boundary_id_type>(this->global_sideset_ids[i])));
1487  }
1488 
1489  if (verbose)
1490  {
1491  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1492  for (std::size_t i=0; i<this->num_global_side_counts.size(); ++i)
1493  libMesh::out << this->num_global_side_counts[i] << ", ";
1494  libMesh::out << std::endl;
1495  }
1496 
1497  // Finally sum up the result
1498  this->comm().sum(this->num_global_side_counts);
1499 
1500  if (verbose)
1501  {
1502  libMesh::out << "[" << this->processor_id() << "] num_global_side_counts = ";
1503  for (std::size_t i=0; i<this->num_global_side_counts.size(); ++i)
1504  libMesh::out << this->num_global_side_counts[i] << ", ";
1505  libMesh::out << std::endl;
1506  }
1507 }
1508 
1509 
1510 
1511 
1512 
1513 
1515 {
1516  std::set<boundary_id_type> local_node_boundary_ids;
1517 
1518  // 1.) Get reference to the set of node boundary IDs *for this processor*
1519  std::set<boundary_id_type> global_node_boundary_ids
1520  (pmesh.get_boundary_info().get_node_boundary_ids().begin(),
1521  pmesh.get_boundary_info().get_node_boundary_ids().end());
1522 
1523  // Save a copy of the local_node_boundary_ids...
1524  local_node_boundary_ids = global_node_boundary_ids;
1525 
1526  // 2.) Gather boundary node IDs from other processors
1527  this->comm().set_union(global_node_boundary_ids);
1528 
1529  // 3.) Now global_node_boundary_ids actually contains a global list of all node boundary IDs
1530  this->num_node_sets_global =
1531  cast_int<int>(global_node_boundary_ids.size());
1532 
1533  // 4.) Create a vector<int> from the global_node_boundary_ids set
1534  this->global_nodeset_ids.clear();
1535  this->global_nodeset_ids.insert(this->global_nodeset_ids.end(),
1536  global_node_boundary_ids.begin(),
1537  global_node_boundary_ids.end());
1538 
1539  if (verbose)
1540  {
1541  libMesh::out << "[" << this->processor_id() << "] global_nodeset_ids = ";
1542  for (std::size_t i=0; i<global_nodeset_ids.size(); ++i)
1543  libMesh::out << global_nodeset_ids[i] << ", ";
1544  libMesh::out << std::endl;
1545 
1546  libMesh::out << "[" << this->processor_id() << "] local_node_boundary_ids = ";
1547  for (std::set<boundary_id_type>::iterator it = local_node_boundary_ids.begin();
1548  it != local_node_boundary_ids.end();
1549  ++it)
1550  libMesh::out << *it << ", ";
1551  libMesh::out << std::endl;
1552  }
1553 
1554  // 7.) We also need to know the number of nodes which is in each of the nodesets, globally.
1555  // There is probably a better way to do this...
1556  std::vector<dof_id_type> boundary_node_list;
1557  std::vector<boundary_id_type> boundary_node_boundary_id_list;
1559  (boundary_node_list, boundary_node_boundary_id_list);
1560 
1561  if (verbose)
1562  {
1563  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
1564  << boundary_node_list.size() << std::endl;
1565  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
1566  for (std::size_t i=0; i<boundary_node_list.size(); ++i)
1567  {
1568  libMesh::out << "(" << boundary_node_list[i] << ", " << boundary_node_boundary_id_list[i] << ") ";
1569  }
1570  libMesh::out << std::endl;
1571  }
1572 
1573  // Now get the global information. In this case, we only want to count boundary
1574  // information for nodes *owned* by this processor, so there are no duplicates.
1575 
1576  // Make sure we don't have any left over information
1577  this->num_global_node_counts.clear();
1578  this->num_global_node_counts.resize(this->global_nodeset_ids.size());
1579 
1580  // Unfortunately, we can't just count up all occurrences of a given id,
1581  // that would give us duplicate entries when we do the parallel summation.
1582  // So instead, only count entries for nodes owned by this processor.
1583  // Start by getting rid of all non-local node entries from the vectors.
1584  std::vector<dof_id_type>::iterator it_node=boundary_node_list.begin();
1585  std::vector<boundary_id_type>::iterator it_id=boundary_node_boundary_id_list.begin();
1586 
1587  // New end iterators, to be updated as we find non-local IDs
1588  std::vector<dof_id_type>::iterator new_node_list_end = boundary_node_list.end();
1589  std::vector<boundary_id_type>::iterator new_boundary_id_list_end = boundary_node_boundary_id_list.end();
1590  for ( ; it_node != new_node_list_end; )
1591  {
1592  if (pmesh.node_ptr( *it_node )->processor_id() != this->processor_id())
1593  {
1594  // Back up the new end iterators to prepare for swap
1595  --new_node_list_end;
1596  --new_boundary_id_list_end;
1597 
1598  // Swap places, the non-local node will now be "past-the-end"
1599  std::swap (*it_node, *new_node_list_end);
1600  std::swap (*it_id, *new_boundary_id_list_end);
1601  }
1602  else // node is local, go to next
1603  {
1604  ++it_node;
1605  ++it_id;
1606  }
1607  }
1608 
1609  // Erase from "new" end to old end on each vector.
1610  boundary_node_list.erase(new_node_list_end, boundary_node_list.end());
1611  boundary_node_boundary_id_list.erase(new_boundary_id_list_end, boundary_node_boundary_id_list.end());
1612 
1613  // Now we can do the local count for each ID...
1614  for (std::size_t i=0; i<global_nodeset_ids.size(); ++i)
1615  {
1616  this->num_global_node_counts[i] = cast_int<int>
1617  (std::count(boundary_node_boundary_id_list.begin(),
1618  boundary_node_boundary_id_list.end(),
1619  cast_int<boundary_id_type>(this->global_nodeset_ids[i])));
1620  }
1621 
1622  // And finally we can sum them up
1623  this->comm().sum(this->num_global_node_counts);
1624 
1625  if (verbose)
1626  {
1627  libMesh::out << "[" << this->processor_id() << "] num_global_node_counts = ";
1628  for (std::size_t i=0; i<num_global_node_counts.size(); ++i)
1629  libMesh::out << num_global_node_counts[i] << ", ";
1630  libMesh::out << std::endl;
1631  }
1632 }
1633 
1634 
1635 
1636 
1638 {
1639  // 1.) Loop over active local elements, build up set of subdomain IDs.
1640  std::set<subdomain_id_type> global_subdomain_ids;
1641 
1642  // This map keeps track of the number of elements in each subdomain over all processors
1643  std::map<subdomain_id_type, unsigned> global_subdomain_counts;
1644 
1645  for (const auto & elem : pmesh.active_local_element_ptr_range())
1646  {
1647  subdomain_id_type cur_subdomain = elem->subdomain_id();
1648 
1649  /*
1650  // We can't have a zero subdomain ID in Exodus (for some reason?)
1651  // so map zero subdomains to a max value...
1652  if (cur_subdomain == 0)
1653  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1654  */
1655 
1656  global_subdomain_ids.insert(cur_subdomain);
1657 
1658  // Increment the count of elements in this subdomain
1659  global_subdomain_counts[cur_subdomain]++;
1660  }
1661 
1662  // We're next going to this->comm().sum the subdomain counts, so save the local counts
1663  this->local_subdomain_counts = global_subdomain_counts;
1664 
1665  {
1666  // 2.) Copy local subdomain IDs into a vector for communication
1667  std::vector<subdomain_id_type> global_subdomain_ids_vector(global_subdomain_ids.begin(),
1668  global_subdomain_ids.end());
1669 
1670  // 3.) Gather them into an enlarged vector
1671  this->comm().allgather(global_subdomain_ids_vector);
1672 
1673  // 4.) Insert any new IDs into the set (any duplicates will be dropped)
1674  global_subdomain_ids.insert(global_subdomain_ids_vector.begin(),
1675  global_subdomain_ids_vector.end());
1676  }
1677 
1678  // 5.) Now global_subdomain_ids actually contains a global list of all subdomain IDs
1679  this->num_elem_blks_global =
1680  cast_int<int>(global_subdomain_ids.size());
1681 
1682  // Print the number of elements found locally in each subdomain
1683  if (verbose)
1684  {
1685  libMesh::out << "[" << this->processor_id() << "] ";
1686  for (std::map<subdomain_id_type, unsigned>::iterator it=global_subdomain_counts.begin();
1687  it != global_subdomain_counts.end();
1688  ++it)
1689  {
1690  libMesh::out << "ID: "
1691  << static_cast<unsigned>(it->first)
1692  << ", Count: " << it->second << ", ";
1693  }
1694  libMesh::out << std::endl;
1695  }
1696 
1697  // 6.) this->comm().sum up the number of elements in each block. We know the global
1698  // subdomain IDs, so pack them into a vector one by one. Use a vector of int since
1699  // that is what Nemesis wants
1700  this->global_elem_blk_cnts.resize(global_subdomain_ids.size());
1701 
1702  unsigned cnt=0;
1703  for (std::set<subdomain_id_type>::iterator it=global_subdomain_ids.begin();
1704  it != global_subdomain_ids.end(); ++it)
1705  {
1706  // Find the entry in the local map, note: if not found, will be created with 0 default value, which is OK...
1707  this->global_elem_blk_cnts[cnt++] = global_subdomain_counts[*it];
1708  }
1709 
1710  // Sum up subdomain counts from all processors
1711  this->comm().sum(this->global_elem_blk_cnts);
1712 
1713  if (verbose)
1714  {
1715  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_cnts = ";
1716  for (std::size_t i=0; i<this->global_elem_blk_cnts.size(); ++i)
1717  libMesh::out << this->global_elem_blk_cnts[i] << ", ";
1718  libMesh::out << std::endl;
1719  }
1720 
1721  // 7.) Create a vector<int> from the global_subdomain_ids set, for passing to Nemesis
1722  this->global_elem_blk_ids.clear();
1723  this->global_elem_blk_ids.insert(this->global_elem_blk_ids.end(), // pos
1724  global_subdomain_ids.begin(),
1725  global_subdomain_ids.end());
1726 
1727  if (verbose)
1728  {
1729  libMesh::out << "[" << this->processor_id() << "] global_elem_blk_ids = ";
1730  for (std::size_t i=0; i<this->global_elem_blk_ids.size(); ++i)
1731  libMesh::out << this->global_elem_blk_ids[i] << ", ";
1732  libMesh::out << std::endl;
1733  }
1734 
1735 
1736  // 8.) We will call put_eb_info_global later, it must be called after this->put_init_global().
1737 }
1738 
1739 
1740 
1741 
1743 {
1744  // If we don't have any local subdomains, it had better be because
1745  // we don't have any local elements
1746 #ifdef DEBUG
1747  if (local_subdomain_counts.empty())
1748  {
1750  pmesh.active_local_elements_end());
1752  }
1753 #endif
1754 
1755  // Elements have to be numbered contiguously based on what block
1756  // number they are in. Therefore we have to do a bit of work to get
1757  // the block (ie subdomain) numbers first and store them off as
1758  // block_ids.
1759 
1760  // Make sure there is no leftover information in the subdomain_map, and reserve
1761  // enough space to store the elements we need.
1762  this->subdomain_map.clear();
1763  for (std::map<subdomain_id_type, unsigned>::iterator it=this->local_subdomain_counts.begin();
1764  it != this->local_subdomain_counts.end();
1765  ++it)
1766  {
1767  subdomain_id_type cur_subdomain = it->first;
1768 
1769  /*
1770  // We can't have a zero subdomain ID in Exodus (for some reason?)
1771  // so map zero subdomains to a max value...
1772  if (cur_subdomain == 0)
1773  cur_subdomain = std::numeric_limits<subdomain_id_type>::max();
1774  */
1775 
1776  if (verbose)
1777  {
1778  libMesh::out << "[" << this->processor_id() << "] "
1779  << "local_subdomain_counts [" << static_cast<unsigned>(cur_subdomain) << "]= "
1780  << it->second
1781  << std::endl;
1782  }
1783 
1784  // *it.first is the subdomain ID, *it.second is the number of elements it contains
1785  this->subdomain_map[ cur_subdomain ].reserve( it->second );
1786  }
1787 
1788 
1789  // First loop over the elements to figure out which elements are in which subdomain
1790  for (const auto & elem : pmesh.active_local_element_ptr_range())
1791  {
1792  // Grab the nodes while we're here.
1793  for (unsigned int n=0; n<elem->n_nodes(); ++n)
1794  this->nodes_attached_to_local_elems.insert( elem->node_id(n) );
1795 
1796  subdomain_id_type cur_subdomain = elem->subdomain_id();
1797 
1798  this->subdomain_map[cur_subdomain].push_back
1799  (cast_int<unsigned>(elem->id()));
1800  }
1801 
1802  // Set num_nodes which is used by exodusII_io_helper
1803  this->num_nodes =
1804  cast_int<int>(this->nodes_attached_to_local_elems.size());
1805 
1806  // Now come up with a 1-based numbering for these nodes
1807  this->exodus_node_num_to_libmesh.clear(); // Make sure it's empty
1808  this->exodus_node_num_to_libmesh.reserve(this->nodes_attached_to_local_elems.size());
1809 
1810  // Also make sure there's no leftover information in the map which goes the
1811  // other direction.
1812  this->libmesh_node_num_to_exodus.clear();
1813 
1814  // Set the map for nodes
1815  for (std::set<int>::iterator it = this->nodes_attached_to_local_elems.begin();
1816  it != this->nodes_attached_to_local_elems.end();
1817  ++it)
1818  {
1819  // I.e. given exodus_node_id,
1820  // exodus_node_num_to_libmesh[ exodus_node_id ] returns the libmesh ID for that node.
1821  // Note that even though most of Exodus is 1-based, this code will map an Exodus ID of
1822  // zero to some libmesh node ID. Is that a problem?
1823  this->exodus_node_num_to_libmesh.push_back(*it);
1824 
1825  // Likewise, given libmesh_node_id,
1826  // libmesh_node_num_to_exodus[ libmesh_node_id ] returns the *Exodus* ID for that node.
1827  // Unlike the exodus_node_num_to_libmesh vector above, this one is a std::map
1828  this->libmesh_node_num_to_exodus[*it] =
1829  cast_int<int>(this->exodus_node_num_to_libmesh.size()); // should never be zero...
1830  }
1831 
1832  // Now we're going to loop over the subdomain map and build a few things right
1833  // now that we'll use later.
1834 
1835  // First make sure our data structures don't have any leftover data...
1836  this->exodus_elem_num_to_libmesh.clear();
1837  this->block_ids.clear();
1838  this->libmesh_elem_num_to_exodus.clear();
1839 
1840  // Now loop over each subdomain and get a unique numbering for the elements
1841  for (std::map<subdomain_id_type, std::vector<unsigned int>>::iterator it = this->subdomain_map.begin();
1842  it != this->subdomain_map.end();
1843  ++it)
1844  {
1845  block_ids.push_back(it->first);
1846 
1847  // Vector of element IDs for this subdomain
1848  std::vector<unsigned int> & elem_ids_this_subdomain = it->second;
1849 
1850  // The code below assumes this subdomain block is not empty, make sure that's the case!
1851  if (elem_ids_this_subdomain.size() == 0)
1852  libmesh_error_msg("Error, no element IDs found in subdomain " << it->first);
1853 
1855 
1856  // Use the first element in this block to get representative information.
1857  // Note that Exodus assumes all elements in a block are of the same type!
1858  // We are using that same assumption here!
1860  (pmesh.elem_ref(elem_ids_this_subdomain[0]).type());
1861  this->num_nodes_per_elem =
1862  pmesh.elem_ref(elem_ids_this_subdomain[0]).n_nodes();
1863 
1864  // Get a reference to the connectivity vector for this subdomain. This vector
1865  // is most likely empty, we are going to fill it up now.
1866  std::vector<int> & current_block_connectivity = this->block_id_to_elem_connectivity[it->first];
1867 
1868  // Just in case it's not already empty...
1869  current_block_connectivity.clear();
1870  current_block_connectivity.resize(elem_ids_this_subdomain.size() * this->num_nodes_per_elem);
1871 
1872  for (std::size_t i=0; i<elem_ids_this_subdomain.size(); i++)
1873  {
1874  unsigned int elem_id = elem_ids_this_subdomain[i];
1875 
1876  // Set the number map for elements
1877  this->exodus_elem_num_to_libmesh.push_back(elem_id);
1878  this->libmesh_elem_num_to_exodus[elem_id] =
1879  cast_int<int>(this->exodus_elem_num_to_libmesh.size());
1880 
1881  const Elem & elem = pmesh.elem_ref(elem_id);
1882 
1883  // Exodus/Nemesis want every block to have the same element type
1884  // libmesh_assert_equal_to (elem->type(), conv.get_canonical_type());
1885 
1886  // But we can get away with writing e.g. HEX8 and INFHEX8 in
1887  // the same block...
1888  libmesh_assert_equal_to (elem.n_nodes(), Elem::build(conv.get_canonical_type(), libmesh_nullptr)->n_nodes());
1889 
1890  for (unsigned int j=0; j < static_cast<unsigned int>(this->num_nodes_per_elem); j++)
1891  {
1892  const unsigned int connect_index = (i*this->num_nodes_per_elem)+j;
1893  const unsigned int elem_node_index = conv.get_node_map(j);
1894 
1895  std::map<int, int>::iterator node_it = libmesh_node_num_to_exodus.find(elem.node_id(elem_node_index));
1896  if (node_it == libmesh_node_num_to_exodus.end())
1897  libmesh_error_msg("Node number " << elem.node_id(elem_node_index) << " not found in libmesh_node_num_to_exodus map.");
1898 
1899  current_block_connectivity[connect_index] = node_it->second;
1900  }
1901  } // End loop over elems in this subdomain
1902  } // end loop over subdomain_map
1903 }
1904 
1905 
1906 
1907 
1908 
1910 {
1911  // The set which will eventually contain the IDs of "border nodes". These are nodes
1912  // that lie on the boundary between one or more processors.
1913  //std::set<unsigned> border_node_ids;
1914 
1915  // map from processor ID to set of nodes which elements from this processor "touch",
1916  // that is,
1917  // proc_nodes_touched[p] = (set all node IDs found in elements owned by processor p)
1918  std::map<unsigned, std::set<unsigned>> proc_nodes_touched;
1919 
1920 
1921  // We are going to create a lot of intermediate data structures here, so make sure
1922  // as many as possible all cleaned up by creating scope!
1923  {
1924  // Loop over active (not just active local) elements, make sets of node IDs for each
1925  // processor which has an element that "touches" a node.
1926  for (const auto & elem : pmesh.active_element_ptr_range())
1927  {
1928  // Get reference to the set for this processor. If it does not exist
1929  // it will be created.
1930  std::set<unsigned> & set_p = proc_nodes_touched[ elem->processor_id() ];
1931 
1932  // Insert all nodes touched by this element into the set
1933  for (unsigned int node=0; node<elem->n_nodes(); ++node)
1934  set_p.insert(elem->node_id(node));
1935  }
1936 
1937  // The number of node communication maps is the number of other processors
1938  // with which we share nodes. (I think.) This is just the size of the map we just
1939  // created, minus 1.
1940  this->num_node_cmaps =
1941  cast_int<int>(proc_nodes_touched.size() - 1);
1942 
1943  // If we've got no elements on this processor and haven't touched
1944  // any nodes, however, then that's 0 other processors with which
1945  // we share nodes, not -1.
1946  if (this->num_node_cmaps == -1)
1947  {
1949  this->num_node_cmaps = 0;
1950  }
1951 
1952  // We can't be connecting to more processors than exist outside
1953  // ourselves
1954  libmesh_assert_less (static_cast<unsigned>(this->num_node_cmaps), this->n_processors());
1955 
1956  if (verbose)
1957  {
1958  libMesh::out << "[" << this->processor_id()
1959  << "] proc_nodes_touched contains "
1960  << proc_nodes_touched.size()
1961  << " sets of nodes."
1962  << std::endl;
1963 
1964  for (proc_nodes_touched_iterator it = proc_nodes_touched.begin();
1965  it != proc_nodes_touched.end();
1966  ++it)
1967  {
1968  libMesh::out << "[" << this->processor_id()
1969  << "] proc_nodes_touched[" << it->first << "] has "
1970  << it->second.size()
1971  << " entries."
1972  << std::endl;
1973  }
1974  }
1975 
1976 
1977  // Loop over all the sets we just created and compute intersections with the
1978  // this processor's set. Obviously, don't intersect with ourself.
1979  for (proc_nodes_touched_iterator it = proc_nodes_touched.begin();
1980  it != proc_nodes_touched.end();
1981  ++it)
1982  {
1983  // Don't compute intersections with ourself
1984  if (it->first == this->processor_id())
1985  continue;
1986 
1987  // Otherwise, compute intersection with other processor and ourself
1988  std::set<unsigned> & my_set = proc_nodes_touched[this->processor_id()];
1989  std::set<unsigned> & other_set = it->second;
1990  std::set<unsigned> & result_set = this->proc_nodes_touched_intersections[ it->first ]; // created if does not exist
1991 
1992  std::set_intersection(my_set.begin(), my_set.end(),
1993  other_set.begin(), other_set.end(),
1994  std::inserter(result_set, result_set.end()));
1995  }
1996 
1997  if (verbose)
1998  {
2000  it != this->proc_nodes_touched_intersections.end();
2001  ++it)
2002  {
2003  libMesh::out << "[" << this->processor_id()
2004  << "] this->proc_nodes_touched_intersections[" << it->first << "] has "
2005  << it->second.size()
2006  << " entries."
2007  << std::endl;
2008  }
2009  }
2010 
2011  // Compute the set_union of all the preceding intersections. This will be the set of
2012  // border node IDs for this processor.
2014  it != this->proc_nodes_touched_intersections.end();
2015  ++it)
2016  {
2017  std::set<unsigned> & other_set = it->second;
2018  std::set<unsigned> intermediate_result; // Don't think we can insert into one of the sets we're unioning...
2019 
2020  std::set_union(this->border_node_ids.begin(), this->border_node_ids.end(),
2021  other_set.begin(), other_set.end(),
2022  std::inserter(intermediate_result, intermediate_result.end()));
2023 
2024  // Swap our intermediate result into the final set
2025  this->border_node_ids.swap(intermediate_result);
2026  }
2027 
2028  if (verbose)
2029  {
2030  libMesh::out << "[" << this->processor_id()
2031  << "] border_node_ids.size()=" << this->border_node_ids.size()
2032  << std::endl;
2033  }
2034  } // end scope for border node ID creation
2035 
2036  // Store the number of border node IDs to be written to Nemesis file
2037  this->num_border_nodes = cast_int<int>(this->border_node_ids.size());
2038 }
2039 
2040 
2041 
2042 
2043 
2045 {
2046  // Write the nodesets. In Nemesis, the idea is to "create space" for the global
2047  // set of boundary nodesets, but to only write node IDs which are local to the current
2048  // processor. This is what is done in Nemesis files created by the "loadbal" script.
2049 
2050  // Store a map of vectors for boundary node IDs on this processor.
2051  // Use a vector of int here so it can be passed directly to Exodus.
2052  std::map<boundary_id_type, std::vector<int>> local_node_boundary_id_lists;
2053  typedef std::map<boundary_id_type, std::vector<int>>::iterator local_node_boundary_id_lists_iterator;
2054 
2055  // FIXME: We should build this list only one time!! We already built it above, but we
2056  // did not have the libmesh to exodus node mapping at that time... for now we'll just
2057  // build it here again, hopefully it's small relative to the size of the entire mesh.
2058  std::vector<dof_id_type> boundary_node_list;
2059  std::vector<boundary_id_type> boundary_node_boundary_id_list;
2061  (boundary_node_list, boundary_node_boundary_id_list);
2062 
2063  if (verbose)
2064  {
2065  libMesh::out << "[" << this->processor_id() << "] boundary_node_list.size()="
2066  << boundary_node_list.size() << std::endl;
2067  libMesh::out << "[" << this->processor_id() << "] (boundary_node_id, boundary_id) = ";
2068  for (std::size_t i=0; i<boundary_node_list.size(); ++i)
2069  {
2070  libMesh::out << "(" << boundary_node_list[i] << ", " << boundary_node_boundary_id_list[i] << ") ";
2071  }
2072  libMesh::out << std::endl;
2073  }
2074 
2075  // For each node in the node list, add it to the vector of node IDs for that
2076  // set for the local processor. This will be used later when writing Exodus
2077  // nodesets.
2078  for (std::size_t i=0; i<boundary_node_list.size(); ++i)
2079  {
2080  // Don't try to grab a reference to the vector unless the current node is attached
2081  // to a local element. Otherwise, another processor will be responsible for writing it in its nodeset.
2082  std::map<int, int>::iterator it = this->libmesh_node_num_to_exodus.find( boundary_node_list[i] );
2083 
2084  if (it != this->libmesh_node_num_to_exodus.end())
2085  {
2086  // Get reference to the vector where this node ID will be inserted. If it
2087  // doesn't yet exist, this will create it.
2088  std::vector<int> & current_id_set = local_node_boundary_id_lists[ boundary_node_boundary_id_list[i] ];
2089 
2090  // Push back Exodus-mapped node ID for this set
2091  // TODO: reserve space in these vectors somehow.
2092  current_id_set.push_back( it->second );
2093  }
2094  }
2095 
2096  // See what we got
2097  if (verbose)
2098  {
2099  for (std::map<boundary_id_type, std::vector<int>>::iterator it = local_node_boundary_id_lists.begin();
2100  it != local_node_boundary_id_lists.end();
2101  ++it)
2102  {
2103  libMesh::out << "[" << this->processor_id() << "] ID: " << it->first << ", ";
2104 
2105  std::vector<int> & current_id_set = it->second;
2106 
2107  // Libmesh node ID (Exodus Node ID)
2108  for (std::size_t j=0; j<current_id_set.size(); ++j)
2109  libMesh::out << current_id_set[j]
2110  << ", ";
2111 
2112  libMesh::out << std::endl;
2113  }
2114  }
2115 
2116  // Loop over *global* nodeset IDs, call the Exodus API. Note that some nodesets may be empty
2117  // for a given processor.
2118  if (global_nodeset_ids.size() > 0)
2119  {
2120  NamesData names_table(global_nodeset_ids.size(), MAX_STR_LENGTH);
2121 
2122  for (std::size_t i=0; i<this->global_nodeset_ids.size(); ++i)
2123  {
2124  const std::string & current_ns_name =
2126 
2127  // Store this name in a data structure that will be used to
2128  // write sideset names to file.
2129  names_table.push_back_entry(current_ns_name);
2130 
2131  if (verbose)
2132  {
2133  libMesh::out << "[" << this->processor_id()
2134  << "] Writing out Exodus nodeset info for ID: " << global_nodeset_ids[i]
2135  << ", Name: " << current_ns_name
2136  << std::endl;
2137  }
2138 
2139  // Convert current global_nodeset_id into an exodus ID, which can't be zero...
2140  int exodus_id = global_nodeset_ids[i];
2141 
2142  /*
2143  // Exodus can't handle zero nodeset IDs (?) Use max short here since
2144  // when libmesh reads it back in, it will want to store it as a short...
2145  if (exodus_id==0)
2146  exodus_id = std::numeric_limits<short>::max();
2147  */
2148 
2149  // Try to find this boundary ID in the local list we created
2150  local_node_boundary_id_lists_iterator it =
2151  local_node_boundary_id_lists.find (cast_int<boundary_id_type>(this->global_nodeset_ids[i]));
2152 
2153  // No nodes found for this boundary ID on this processor
2154  if (it == local_node_boundary_id_lists.end())
2155  {
2156  if (verbose)
2157  libMesh::out << "[" << this->processor_id()
2158  << "] No nodeset data for ID: " << global_nodeset_ids[i]
2159  << " on this processor." << std::endl;
2160 
2161  // Call the Exodus interface to write the parameters of this node set
2162  this->ex_err = exII::ex_put_node_set_param(this->ex_id,
2163  exodus_id,
2164  0, /* No nodes for this ID */
2165  0 /* No distribution factors */);
2166  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2167 
2168  }
2169  else // Boundary ID *was* found in list
2170  {
2171  // Get reference to the vector of node IDs
2172  std::vector<int> & current_nodeset_ids = it->second;
2173 
2174  // Call the Exodus interface to write the parameters of this node set
2175  this->ex_err = exII::ex_put_node_set_param(this->ex_id,
2176  exodus_id,
2177  current_nodeset_ids.size(),
2178  0 /* No distribution factors */);
2179 
2180  EX_CHECK_ERR(this->ex_err, "Error writing nodeset parameters in Nemesis");
2181 
2182  // Call Exodus interface to write the actual node IDs for this boundary ID
2183  this->ex_err = exII::ex_put_node_set(this->ex_id,
2184  exodus_id,
2185  &current_nodeset_ids[0]);
2186 
2187  EX_CHECK_ERR(this->ex_err, "Error writing nodesets in Nemesis");
2188 
2189  }
2190  } // end loop over global nodeset IDs
2191 
2192  // Write out the nodeset names
2193  ex_err = exII::ex_put_names(ex_id,
2194  exII::EX_NODE_SET,
2195  names_table.get_char_star_star());
2196  EX_CHECK_ERR(ex_err, "Error writing nodeset names");
2197  } // end for loop over global nodeset IDs
2198 }
2199 
2200 
2201 
2202 
2204 {
2205  // Write the sidesets. In Nemesis, the idea is to "create space" for the global
2206  // set of boundary sidesets, but to only write sideset IDs which are local to the current
2207  // processor. This is what is done in Nemesis files created by the "loadbal" script.
2208  // See also: ExodusII_IO_Helper::write_sidesets()...
2209 
2210 
2211  // Store a map of vectors for boundary side IDs on this processor.
2212  // Use a vector of int here so it can be passed directly to Exodus.
2213  std::map<boundary_id_type, std::vector<int>> local_elem_boundary_id_lists;
2214  std::map<boundary_id_type, std::vector<int>> local_elem_boundary_id_side_lists;
2215  typedef std::map<boundary_id_type, std::vector<int>>::iterator local_elem_boundary_id_lists_iterator;
2216 
2218 
2219  // FIXME: We already built this list once, we should reuse that information!
2220  std::vector<dof_id_type > bndry_elem_list;
2221  std::vector<unsigned short int > bndry_side_list;
2222  std::vector<boundary_id_type > bndry_id_list;
2223 
2225  (bndry_elem_list, bndry_side_list, bndry_id_list);
2226 
2227  // Integer looping, skipping non-local elements
2228  for (std::size_t i=0; i<bndry_elem_list.size(); ++i)
2229  {
2230  // Get pointer to current Elem
2231  const Elem * elem = mesh.elem_ptr(bndry_elem_list[i]);
2232 
2233  std::vector<const Elem *> family;
2234 #ifdef LIBMESH_ENABLE_AMR
2235  // We need to build up active elements if AMR is enabled and add
2236  // them to the exodus sidesets instead of the potentially inactive "parent" elements
2237  // Technically we don't need to "reset" the tree since the vector was just created.
2238  elem->active_family_tree_by_side(family, bndry_side_list[i], /*reset tree=*/false);
2239 #else
2240  // If AMR is not even enabled, just push back the element itself
2241  family.push_back( elem );
2242 #endif
2243 
2244  // Loop over all the elements in the family tree, store their converted IDs
2245  // and side IDs to the map's vectors. TODO: Somehow reserve enough space for these
2246  // push_back's...
2247  for (std::size_t j=0; j<family.size(); ++j)
2248  {
2249  const dof_id_type f_id = family[j]->id();
2250  const Elem & f = mesh.elem_ref(f_id);
2251 
2252  // If element is local, process it
2253  if (f.processor_id() == this->processor_id())
2254  {
2256 
2257  // Use the libmesh to exodus data structure map to get the proper sideset IDs
2258  // The data structure contains the "collapsed" contiguous ids.
2259  //
2260  // We know the parent element is local, but let's be absolutely sure that all the children have been
2261  // actually mapped to Exodus IDs before we blindly try to add them...
2262  std::map<int,int>::iterator it = this->libmesh_elem_num_to_exodus.find( f_id );
2263  if (it != this->libmesh_elem_num_to_exodus.end())
2264  {
2265  local_elem_boundary_id_lists[ bndry_id_list[i] ].push_back( it->second );
2266  local_elem_boundary_id_side_lists[ bndry_id_list[i] ].push_back(conv.get_inverse_side_map( bndry_side_list[i] ));
2267  }
2268  else
2269  libmesh_error_msg("Error, no Exodus mapping for Elem " \
2270  << f_id \
2271  << " on processor " \
2272  << this->processor_id());
2273  }
2274  }
2275  }
2276 
2277 
2278  // Loop over *global* sideset IDs, call the Exodus API. Note that some sidesets may be empty
2279  // for a given processor.
2280  if (global_sideset_ids.size() > 0)
2281  {
2282  NamesData names_table(global_sideset_ids.size(), MAX_STR_LENGTH);
2283 
2284  for (std::size_t i=0; i<this->global_sideset_ids.size(); ++i)
2285  {
2286  const std::string & current_ss_name =
2288 
2289  // Store this name in a data structure that will be used to
2290  // write sideset names to file.
2291  names_table.push_back_entry(current_ss_name);
2292 
2293  if (verbose)
2294  {
2295  libMesh::out << "[" << this->processor_id()
2296  << "] Writing out Exodus sideset info for ID: " << global_sideset_ids[i]
2297  << ", Name: " << current_ss_name
2298  << std::endl;
2299  }
2300 
2301  // Convert current global_sideset_id into an exodus ID, which can't be zero...
2302  int exodus_id = global_sideset_ids[i];
2303 
2304  /*
2305  // Exodus can't handle zero sideset IDs (?) Use max short here since
2306  // when libmesh reads it back in, it will want to store it as a short...
2307  if (exodus_id==0)
2308  exodus_id = std::numeric_limits<short>::max();
2309  */
2310 
2311  // Try to find this boundary ID in the local list we created
2312  local_elem_boundary_id_lists_iterator it =
2313  local_elem_boundary_id_lists.find (cast_int<boundary_id_type>(this->global_sideset_ids[i]));
2314 
2315  // No sides found for this boundary ID on this processor
2316  if (it == local_elem_boundary_id_lists.end())
2317  {
2318  if (verbose)
2319  libMesh::out << "[" << this->processor_id()
2320  << "] No sideset data for ID: " << global_sideset_ids[i]
2321  << " on this processor." << std::endl;
2322 
2323  // Call the Exodus interface to write the parameters of this side set
2324  this->ex_err = exII::ex_put_side_set_param(this->ex_id,
2325  exodus_id,
2326  0, /* No sides for this ID */
2327  0 /* No distribution factors */);
2328  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2329 
2330  }
2331  else // Boundary ID *was* found in list
2332  {
2333  // Get iterator to sides vector as well
2334  local_elem_boundary_id_lists_iterator it_sides =
2335  local_elem_boundary_id_side_lists.find (cast_int<boundary_id_type>(this->global_sideset_ids[i]));
2336 
2337  libmesh_assert (it_sides != local_elem_boundary_id_side_lists.end());
2338 
2339  // Get reference to the vector of elem IDs
2340  std::vector<int> & current_sideset_elem_ids = it->second;
2341 
2342  // Get reference to the vector of side IDs
2343  std::vector<int> & current_sideset_side_ids = (*it_sides).second;
2344 
2345  // Call the Exodus interface to write the parameters of this side set
2346  this->ex_err = exII::ex_put_side_set_param(this->ex_id,
2347  exodus_id,
2348  current_sideset_elem_ids.size(),
2349  0 /* No distribution factors */);
2350 
2351  EX_CHECK_ERR(this->ex_err, "Error writing sideset parameters in Nemesis");
2352 
2353  // Call Exodus interface to write the actual side IDs for this boundary ID
2354  this->ex_err = exII::ex_put_side_set(this->ex_id,
2355  exodus_id,
2356  &current_sideset_elem_ids[0],
2357  &current_sideset_side_ids[0]);
2358 
2359  EX_CHECK_ERR(this->ex_err, "Error writing sidesets in Nemesis");
2360  }
2361  } // end for loop over global sideset IDs
2362 
2363  // Write sideset names to file. Some of these may be blank strings
2364  // if the current processor didn't have all the sideset names for
2365  // any reason...
2366  ex_err = exII::ex_put_names(this->ex_id,
2367  exII::EX_SIDE_SET,
2368  names_table.get_char_star_star());
2369  EX_CHECK_ERR(ex_err, "Error writing sideset names");
2370 
2371  } // end if (global_sideset_ids.size() > 0)
2372 }
2373 
2374 
2375 
2376 void Nemesis_IO_Helper::write_nodal_coordinates(const MeshBase & mesh, bool /*use_discontinuous*/)
2377 {
2378  // Make sure that the reference passed in is really a DistributedMesh
2379  // const DistributedMesh & pmesh = cast_ref<const DistributedMesh &>(mesh);
2380 
2381  unsigned local_num_nodes =
2382  cast_int<unsigned int>(this->exodus_node_num_to_libmesh.size());
2383 
2384  x.resize(local_num_nodes);
2385  y.resize(local_num_nodes);
2386  z.resize(local_num_nodes);
2387 
2388  // Just loop over our list outputting the nodes the way we built the map
2389  for (unsigned int i=0; i<local_num_nodes; ++i)
2390  {
2391  const Point & pt = mesh.point(this->exodus_node_num_to_libmesh[i]);
2392  x[i]=pt(0);
2393  y[i]=pt(1);
2394  z[i]=pt(2);
2395  }
2396 
2397  if (local_num_nodes)
2398  {
2399  if (_single_precision)
2400  {
2401  std::vector<float>
2402  x_single(x.begin(), x.end()),
2403  y_single(y.begin(), y.end()),
2404  z_single(z.begin(), z.end());
2405 
2406  ex_err = exII::ex_put_coord(ex_id, &x_single[0], &y_single[0], &z_single[0]);
2407  }
2408  else
2409  {
2410  // Call Exodus API to write nodal coordinates...
2411  ex_err = exII::ex_put_coord(ex_id, &x[0], &y[0], &z[0]);
2412  }
2413  EX_CHECK_ERR(ex_err, "Error writing node coordinates");
2414 
2415  // And write the nodal map we created for them
2416  ex_err = exII::ex_put_node_num_map(ex_id, &(this->exodus_node_num_to_libmesh[0]));
2417  EX_CHECK_ERR(ex_err, "Error writing node num map");
2418  }
2419  else // Does the Exodus API want us to write empty nodal coordinates?
2420  {
2421  ex_err = exII::ex_put_coord(ex_id, libmesh_nullptr, libmesh_nullptr, libmesh_nullptr);
2422  EX_CHECK_ERR(ex_err, "Error writing empty node coordinates");
2423 
2424  ex_err = exII::ex_put_node_num_map(ex_id, libmesh_nullptr);
2425  EX_CHECK_ERR(ex_err, "Error writing empty node num map");
2426  }
2427 }
2428 
2429 
2430 
2431 
2432 
2433 void Nemesis_IO_Helper::write_elements(const MeshBase & mesh, bool /*use_discontinuous*/)
2434 {
2435  // Only write elements if there are elements blocks available.
2436  if (this->num_elem_blks_global > 0)
2437  {
2438  // Data structure to store element block names that will be used to
2439  // write the element block names to file.
2440  NamesData names_table(this->num_elem_blks_global, MAX_STR_LENGTH);
2441 
2442  // Loop over all blocks, even if we don't have elements in each block.
2443  // If we don't have elements we need to write out a 0 for that block...
2444  for (unsigned int i=0; i<static_cast<unsigned>(this->num_elem_blks_global); ++i)
2445  {
2446  // Even if there are no elements for this block on the current
2447  // processor, we still want to write its name to file, if
2448  // possible. MeshBase::subdomain_name() will just return an
2449  // empty string if there is no name associated with the current
2450  // block.
2451  names_table.push_back_entry(mesh.subdomain_name(this->global_elem_blk_ids[i]));
2452 
2453  // Search for the current global block ID in the map
2454  std::map<int, std::vector<int>>::iterator it =
2455  this->block_id_to_elem_connectivity.find( this->global_elem_blk_ids[i] );
2456 
2457  // If not found, write a zero to file....
2458  if (it == this->block_id_to_elem_connectivity.end())
2459  {
2460  this->ex_err = exII::ex_put_elem_block(this->ex_id,
2461  this->global_elem_blk_ids[i],
2462  "Empty",
2463  0, /* n. elements in this block */
2464  0, /* n. nodes per element */
2465  0); /* number of attributes per element */
2466 
2467  EX_CHECK_ERR(this->ex_err, "Error writing element block from Nemesis.");
2468  }
2469 
2470  // Otherwise, write the actual block information and connectivity to file
2471  else
2472  {
2473  subdomain_id_type block =
2474  cast_int<subdomain_id_type>(it->first);
2475  std::vector<int> & this_block_connectivity = it->second;
2476  std::vector<unsigned int> & elements_in_this_block = subdomain_map[block];
2477 
2479 
2480  //Use the first element in this block to get representative information.
2481  //Note that Exodus assumes all elements in a block are of the same type!
2482  //We are using that same assumption here!
2483  const ExodusII_IO_Helper::Conversion conv =
2484  em.assign_conversion(mesh.elem_ref(elements_in_this_block[0]).type());
2485 
2486  this->num_nodes_per_elem =
2487  mesh.elem_ref(elements_in_this_block[0]).n_nodes();
2488 
2489  ex_err = exII::ex_put_elem_block(ex_id,
2490  block,
2491  conv.exodus_elem_type().c_str(),
2492  elements_in_this_block.size(),
2494  0);
2495  EX_CHECK_ERR(ex_err, "Error writing element block from Nemesis.");
2496 
2497  ex_err = exII::ex_put_elem_conn(ex_id,
2498  block,
2499  &this_block_connectivity[0]);
2500  EX_CHECK_ERR(ex_err, "Error writing element connectivities from Nemesis.");
2501  }
2502  } // end loop over global block IDs
2503 
2504  // Only call this once, not in the loop above!
2505  ex_err = exII::ex_put_elem_num_map(ex_id,
2507  EX_CHECK_ERR(ex_err, "Error writing element map");
2508 
2509  // Write the element block names to file.
2510  ex_err = exII::ex_put_names(ex_id, exII::EX_ELEM_BLOCK, names_table.get_char_star_star());
2511  EX_CHECK_ERR(ex_err, "Error writing element block names");
2512  } // end if (this->num_elem_blks_global > 0)
2513 }
2514 
2515 
2516 
2517 
2518 
2519 void Nemesis_IO_Helper::write_nodal_solution(const std::vector<Number> & values,
2520  const std::vector<std::string> & names,
2521  int timestep)
2522 {
2523  int num_vars = cast_int<int>(names.size());
2524  //int num_values = values.size(); // Not used?
2525 
2526  for (int c=0; c<num_vars; c++)
2527  {
2528 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
2529  std::vector<Real> real_parts(num_nodes);
2530  std::vector<Real> imag_parts(num_nodes);
2531  std::vector<Real> magnitudes(num_nodes);
2532 
2533  for (int i=0; i<num_nodes; ++i)
2534  {
2535  Number value = values[this->exodus_node_num_to_libmesh[i]*num_vars + c];
2536  real_parts[i] = value.real();
2537  imag_parts[i] = value.imag();
2538  magnitudes[i] = std::abs(value);
2539  }
2540  write_nodal_values(3*c+1,real_parts,timestep);
2541  write_nodal_values(3*c+2,imag_parts,timestep);
2542  write_nodal_values(3*c+3,magnitudes,timestep);
2543 #else
2544  std::vector<Number> cur_soln(num_nodes);
2545 
2546  // Copy out this variable's solution
2547  for (int i=0; i<num_nodes; i++)
2548  cur_soln[i] = values[this->exodus_node_num_to_libmesh[i]*num_vars + c];
2549 
2550  write_nodal_values(c+1,cur_soln,timestep);
2551 #endif
2552  }
2553 }
2554 
2555 
2556 
2558  const std::vector<std::string> & names,
2559  int timestep)
2560 {
2561  int num_vars = cast_int<int>(names.size());
2562 
2563 #ifndef LIBMESH_USE_COMPLEX_NUMBERS
2564 
2565  for (int c=0; c<num_vars; c++)
2566  {
2567  // Fill up a std::vector with the dofs for the current variable
2568  std::vector<numeric_index_type> required_indices(num_nodes);
2569 
2570  for (int i=0; i<num_nodes; i++)
2571  required_indices[i] = this->exodus_node_num_to_libmesh[i]*num_vars + c;
2572 
2573  // Get the dof values required to write just our local part of
2574  // the solution vector.
2575  std::vector<Number> local_soln;
2576  parallel_soln.localize(local_soln, required_indices);
2577 
2578  // Call the ExodusII_IO_Helper function to write the data.
2579  write_nodal_values(c+1, local_soln, timestep);
2580  }
2581 
2582 #else // LIBMESH_USE_COMPLEX_NUMBERS
2583 
2584  for (int c=0; c<num_vars; c++)
2585  {
2586  // Fill up a std::vector with the dofs for the current variable
2587  std::vector<numeric_index_type> required_indices(num_nodes);
2588 
2589  for (int i=0; i<num_nodes; i++)
2590  required_indices[i] = this->exodus_node_num_to_libmesh[i]*num_vars + c;
2591 
2592  // Get the dof values required to write just our local part of
2593  // the solution vector.
2594  std::vector<Number> local_soln;
2595  parallel_soln.localize(local_soln, required_indices);
2596 
2597  // We have the local (complex) values. Now extract the real,
2598  // imaginary, and magnitude values from them.
2599  std::vector<Real> real_parts(num_nodes);
2600  std::vector<Real> imag_parts(num_nodes);
2601  std::vector<Real> magnitudes(num_nodes);
2602 
2603  for (int i=0; i<num_nodes; ++i)
2604  {
2605  real_parts[i] = local_soln[i].real();
2606  imag_parts[i] = local_soln[i].imag();
2607  magnitudes[i] = std::abs(local_soln[i]);
2608  }
2609 
2610  // Write the real, imaginary, and magnitude values to file.
2611  write_nodal_values(3*c+1, real_parts, timestep);
2612  write_nodal_values(3*c+2, imag_parts, timestep);
2613  write_nodal_values(3*c+3, magnitudes, timestep);
2614  }
2615 
2616 #endif
2617 
2618 
2619 }
2620 
2621 
2622 
2623 
2624 std::string Nemesis_IO_Helper::construct_nemesis_filename(const std::string & base_filename)
2625 {
2626  // Build a filename for this processor. This code is cut-n-pasted from the read function
2627  // and should probably be put into a separate function...
2628  std::ostringstream file_oss;
2629 
2630  // We have to be a little careful here: Nemesis left pads its file
2631  // numbers based on the number of processors, so for example on 10
2632  // processors, we'd have:
2633  // mesh.e.10.00
2634  // mesh.e.10.01
2635  // mesh.e.10.02
2636  // ...
2637  // mesh.e.10.09
2638 
2639  // And on 100 you'd have:
2640  // mesh.e.100.000
2641  // mesh.e.100.001
2642  // ...
2643  // mesh.e.128.099
2644 
2645  // Find the length of the highest processor ID
2646  file_oss << (this->n_processors());
2647  unsigned int field_width = cast_int<unsigned int>(file_oss.str().size());
2648 
2649  if (verbose)
2650  libMesh::out << "field_width=" << field_width << std::endl;
2651 
2652  file_oss.str(""); // reset the string stream
2653  file_oss << base_filename
2654  << '.' << this->n_processors()
2655  << '.' << std::setfill('0') << std::setw(field_width) << this->processor_id();
2656 
2657  // Return the resulting string
2658  return file_oss.str();
2659 }
2660 
2661 } // namespace libMesh
2662 
2663 #endif // #if defined(LIBMESH_HAVE_NEMESIS_API) && defined(LIBMESH_HAVE_EXODUS_API)
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:117
double abs(double a)
Nemesis_IO_Helper(const ParallelObject &parent, bool verbose=false, bool single_precision=false)
Constructor.
virtual ~Nemesis_IO_Helper()
Destructor.
std::vector< int > num_global_node_counts
void compute_element_maps()
This function computes element maps (really just packs vectors) which map the elements to internal an...
std::vector< int > global_nodeset_ids
Containers for reading global nodeset information.
virtual const Point & point(const dof_id_type i) const =0
static UniquePtr< Elem > build(const ElemType type, Elem *p=libmesh_nullptr)
Definition: elem.C:238
virtual ElemType type() const =0
char ftype
The type of file to be written.
std::vector< int > global_sideset_ids
Containers for reading global sideset (boundary conditions) information.
void compute_num_global_nodesets(const MeshBase &pmesh)
This function uses global communication routines to determine the number of nodesets across the entir...
std::vector< int > node_cmap_ids
Vectors for storing the communication map parameters.
virtual void create(std::string filename)
This function is specialized from ExodusII_IO_Helper to create the nodal coordinates stored on the lo...
void get_ss_param_global()
Fills: global_sideset_ids, num_global_side_counts, num_global_side_df_counts Call after: get_init_glo...
int num_external_nodes
The number of FEM nodes that reside on another processor but whose element partially resides on the c...
int nemesis_err_flag
Member data.
std::vector< int > exodus_elem_num_to_libmesh
std::vector< int > node_mape
Vector which stores external node IDs.
virtual void write_sidesets(const MeshBase &mesh)
Writes the sidesets for this processor.
void compute_elem_communication_maps()
This function computes element communication maps (really just packs vectors) in preparation for writ...
void compute_num_global_elem_blocks(const MeshBase &pmesh)
This function uses global communication routines to determine the number of element blocks across the...
processor_id_type n_processors() const
This is the base class from which all geometric element types are derived.
Definition: elem.h:89
MeshBase & mesh
int num_node_cmaps
The number of nodal communication maps for this processor.
ExodusII_IO_Helper::Conversion assign_conversion(std::string type_str)
const class libmesh_nullptr_t libmesh_nullptr
void put_elem_map(std::vector< int > &elem_mapi, std::vector< int > &elem_mapb)
Outputs IDs of internal and border elements.
std::vector< int > num_global_node_df_counts
virtual const Node * node_ptr(const dof_id_type i) const =0
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end...
virtual SimpleRange< element_iterator > active_element_ptr_range()=0
std::vector< std::vector< int > > elem_cmap_side_ids
std::vector< std::vector< int > > node_cmap_proc_ids
void put_ns_param_global(std::vector< int > &global_nodeset_ids, std::vector< int > &num_global_node_counts, std::vector< int > &num_global_node_df_counts)
This function writes information about global node sets.
The libMesh namespace provides an interface to certain functionality in the library.
void build_side_list(std::vector< dof_id_type > &element_id_list, std::vector< unsigned short int > &side_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of element numbers, sides, and ids for those sides.
void put_eb_info_global(std::vector< int > &global_elem_blk_ids, std::vector< int > &global_elem_blk_cnts)
Writes global block information to the file .) global_elem_blk_ids - list of block IDs for all blocks...
void compute_border_node_ids(const MeshBase &pmesh)
This function constructs the set of border node IDs present on the current mesh.
void compute_communication_map_parameters()
This function determines the communication map parameters which will eventually be written to file...
void write_nodal_values(int var_id, const std::vector< Real > &values, int timestep)
Writes the vector of values to a nodal variable.
Real distance(const Point &p)
void close()
Closes the ExodusII mesh file.
int num_nodes_global
Global initial information.
void compute_node_communication_maps()
Compute the node communication maps (really just pack vectors) in preparation for writing them to fil...
std::vector< int > num_global_side_df_counts
This is the MeshBase class.
Definition: mesh_base.h:68
const std::string & get_nodeset_name(boundary_id_type id) const
libmesh_assert(j)
std::map< int, int > libmesh_elem_num_to_exodus
virtual unsigned int n_nodes() const =0
void build_element_and_node_maps(const MeshBase &pmesh)
This function builds the libmesh -> exodus and exodus -> libmesh node and element maps...
void put_init_global(dof_id_type num_nodes_global, dof_id_type num_elems_global, unsigned num_elem_blks_global, unsigned num_node_sets_global, unsigned num_side_sets_global)
Writes global information including: .) global number of nodes .) global number of elems ...
virtual SimpleRange< element_iterator > active_local_element_ptr_range()=0
const dof_id_type n_nodes
Definition: tecplot_io.C:67
virtual void write_nodal_coordinates(const MeshBase &mesh, bool use_discontinuous=false)
This function is specialized from ExodusII_IO_Helper to write only the nodal coordinates stored on th...
std::vector< int > num_global_side_counts
int8_t boundary_id_type
Definition: id_types.h:51
void push_back_entry(const std::string &name)
Adds another name to the current data table.
std::vector< int > global_elem_blk_ids
Read the global element block IDs and counts.
virtual element_iterator active_local_elements_begin()=0
std::vector< std::vector< int > > elem_cmap_elem_ids
3 vectors of vectors for storing element communication IDs for this processor.
std::set< unsigned > border_elem_ids
A set of border elem IDs for this processor.
virtual void write_nodesets(const MeshBase &mesh)
Writes the nodesets for this processor.
std::set< unsigned > internal_node_ids
A set of internal node IDs for this processor.
virtual dof_id_type parallel_n_elem() const =0
std::vector< int > node_mapi
Vector which stores internal node IDs.
int num_proc
The number of processors for which the NEMESIS I file was created.
This is the ExodusII_IO_Helper class.
std::vector< std::vector< int > > elem_cmap_proc_ids
void put_cmap_params(std::vector< int > &node_cmap_ids, std::vector< int > &node_cmap_node_cnts, std::vector< int > &elem_cmap_ids, std::vector< int > &elem_cmap_elem_cnts)
Outputs initial information for communication maps.
std::set< unsigned > border_node_ids
The set which will eventually contain the IDs of "border nodes".
void write_nodal_solution(const NumericVector< Number > &parallel_soln, const std::vector< std::string > &names, int timestep)
Takes a parallel solution vector containing the node-major solution vector for all variables and outp...
std::string & subdomain_name(subdomain_id_type id)
Definition: mesh_base.C:576
std::set< int > nodes_attached_to_local_elems
libMesh numbered node ids attached to local elems.
void get_init_global()
Reading functions.
void build_node_list(std::vector< dof_id_type > &node_id_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of nodes and ids for those nodes.
This class forms the base class for all other classes that are expected to be implemented in parallel...
void compute_num_global_sidesets(const MeshBase &pmesh)
This function uses global communication routines to determine the number of sidesets across the entir...
std::map< int, int > libmesh_node_num_to_exodus
int num_internal_elems
The number of internal FEM elements.
void compute_internal_and_border_elems_and_internal_nodes(const MeshBase &pmesh)
This function constructs the set of border and internal element IDs and internal node IDs present on ...
virtual element_iterator active_elements_begin()=0
Active, local, and negation forms of the element iterators described above.
virtual dof_id_type parallel_n_nodes() const =0
void compute_node_maps()
Compute the node maps (really just pack vectors) which map the nodes to internal, border...
void put_elem_cmap(std::vector< std::vector< int >> &elem_cmap_elem_ids, std::vector< std::vector< int >> &elem_cmap_side_ids, std::vector< std::vector< int >> &elem_cmap_proc_ids)
Writes information about elemental communication map.
std::map< subdomain_id_type, unsigned > local_subdomain_counts
This map keeps track of the number of elements in each subdomain (block) for this processor...
std::map< int, std::vector< int > > block_id_to_elem_connectivity
This is the block connectivity, i.e.
char ** get_char_star_star()
Provide access to the underlying C data table.
virtual element_iterator active_elements_end()=0
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:490
std::map< unsigned, std::set< unsigned > >::iterator proc_nodes_touched_iterator
Typedef for an iterator into the data structure above.
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
void put_node_map(std::vector< int > &node_mapi, std::vector< int > &node_mapb, std::vector< int > &node_mape)
Outputs IDs of internal, border, and external nodes.
std::vector< int > elem_cmap_elem_cnts
const std::set< boundary_id_type > & get_side_boundary_ids() const
std::string construct_nemesis_filename(const std::string &base_filename)
Given base_filename, foo.e, constructs the Nemesis filename foo.e.X.Y, where X=n. ...
const Parallel::Communicator & comm() const
int num_elem_cmaps
The number of elemental communication maps for this processor.
OStreamProxy out
void set_union(T &data, const unsigned int root_id, const Communicator &comm=Communicator_World)
void swap(Iterator &lhs, Iterator &rhs)
swap, used to implement op=
std::vector< int > elem_cmap_ids
const std::set< boundary_id_type > & get_node_boundary_ids() const
static const bool value
Definition: xdr_io.C:108
void put_ss_param_global(std::vector< int > &global_sideset_ids, std::vector< int > &num_global_side_counts, std::vector< int > &num_global_side_df_counts)
This function writes information about global side sets.
int num_proc_in_file
The number of processors for which the NEMESIS I file stores information.
std::vector< int > node_cmap_node_cnts
std::map< unsigned, std::set< unsigned > > proc_nodes_touched_intersections
Another map to store sets of intersections with each other processor (other than ourself, of course).
std::vector< int > exodus_node_num_to_libmesh
void active_family_tree_by_side(std::vector< const Elem * > &family, const unsigned int side, const bool reset=true) const
Same as the active_family_tree() member, but only adds elements which are next to side...
Definition: elem.C:1777
void write_exodus_initialization_info(const MeshBase &pmesh, const std::string &title)
This function writes exodus-specific initialization information.
void put_init_info(unsigned num_proc, unsigned num_proc_in_file, const char *ftype)
Writing functions.
void put_node_cmap(std::vector< std::vector< int >> &node_cmap_node_ids, std::vector< std::vector< int >> &node_cmap_proc_ids)
Outputs all of the nodal communication maps for this processor.
virtual void initialize(std::string title, const MeshBase &mesh, bool use_discontinuous=false)
Specialization of the initialize function from ExodusII_IO_Helper that also writes global initial dat...
std::vector< int > elem_mapb
Vector which stores border element IDs.
virtual void write_elements(const MeshBase &mesh, bool use_discontinuous=false)
This function is specialized to write the connectivity.
std::vector< int > node_mapb
Vector which stores border node IDs.
std::vector< std::vector< int > > node_cmap_node_ids
2 vectors of vectors for storing the node communication IDs for this processor.
std::map< unsigned, std::set< std::pair< unsigned, unsigned > > >::iterator proc_border_elem_sets_iterator
Typedef for an iterator into the data structure above.
virtual const Elem * elem_ptr(const dof_id_type i) const =0
This class is useful for managing anything that requires a char ** input/output in ExodusII file...
void sum(T &r) const
Take a local variable and replace it with the sum of it&#39;s values on all processors.
const std::string & get_sideset_name(boundary_id_type id) const
virtual element_iterator active_local_elements_end()=0
long double min(long double a, double b)
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
virtual void localize(std::vector< T > &v_local) const =0
Creates a copy of the global vector in the local vector v_local.
std::vector< int > global_elem_blk_cnts
void put_loadbal_param(unsigned num_internal_nodes, unsigned num_border_nodes, unsigned num_external_nodes, unsigned num_internal_elems, unsigned num_border_elems, unsigned num_node_cmaps, unsigned num_elem_cmaps)
Writes load balance parameters, some of which are described below: .) num_internal_nodes - nodes "who...
std::vector< int > elem_mapi
Vector which stores internal element IDs.
std::map< unsigned, std::set< std::pair< unsigned, unsigned > > > proc_border_elem_sets
Map between processor ID and (element,side) pairs bordering that processor ID.
std::map< subdomain_id_type, std::vector< unsigned int > > subdomain_map
Map of subdomains to element numbers.
int num_border_nodes
The number of FEM nodes local to a processor but residing in an element which also has FEM nodes on o...
void put_n_coord(unsigned start_node_num, unsigned num_nodes, std::vector< Real > &x_coor, std::vector< Real > &y_coor, std::vector< Real > &z_coor)
Writes the specified number of coordinate values starting at the specified index. ...
int num_internal_nodes
To be used with the Nemesis::ne_get_loadbal_param() routine.
std::set< unsigned > internal_elem_ids
A set of internal elem IDs for this processor.
uint8_t dof_id_type
Definition: id_types.h:64
processor_id_type processor_id() const
processor_id_type processor_id() const
Definition: dof_object.h:694
void set_union(T &data, const unsigned int root_id) const
Take a container of local variables on each processor, and collect their union over all processors...
int num_border_elems
The number of border FEM elements.
void allgather(const T &send, std::vector< T > &recv) const
Take a vector of length this->size(), and fill in recv[processor_id] = the value of send on that proc...