libMesh
rb_construction_base.C
Go to the documentation of this file.
1 // rbOOmit: An implementation of the Certified Reduced Basis method.
2 // Copyright (C) 2009, 2010 David J. Knezevic
3 
4 // This file is part of rbOOmit.
5 
6 // rbOOmit is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License, or (at your option) any later version.
10 
11 // rbOOmit is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
15 
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20 // C++ includes
21 #include <algorithm>
22 #include <cstddef>
23 #include <ctime>
24 #include <cstdlib> // *must* precede <cmath> for proper std:abs() on PGI, Sun Studio CC
25 #include <cmath>
26 #include <iterator>
27 #include <memory>
28 #include <numeric>
29 
30 // rbOOmit includes
31 #include "libmesh/rb_construction_base.h"
32 #include "libmesh/rb_parameters.h"
33 
34 // libMesh includes
35 #include "libmesh/id_types.h"
36 #include "libmesh/libmesh_common.h"
37 #include "libmesh/numeric_vector.h"
38 #include "libmesh/equation_systems.h"
39 #include "libmesh/parallel.h"
40 #include "libmesh/condensed_eigen_system.h"
41 #include "libmesh/linear_implicit_system.h"
42 #include "libmesh/int_range.h"
43 #include "libmesh/utility.h"
44 
45 // Nvidia C++ whining about destroying incomplete unique_ptr<T> Base::foo types
46 #include "libmesh/dof_map.h"
47 #include "libmesh/shell_matrix.h"
48 #include "libmesh/sparse_matrix.h"
49 #include "timpi/communicator.h"
50 
51 // Anonymous namespace
52 namespace
53 {
54 
55 /*
56  * Helper function to divide a vector of samples across processors.
57  * Returns a pair with num_local_samples and first_local_index.
58  */
59 std::pair<unsigned int, unsigned int> calculate_n_local_samples_and_index(
60  const libMesh::Parallel::Communicator &communicator,
61  const unsigned int n_global_samples,
62  const bool serial)
63 {
64  unsigned int n_local_samples = n_global_samples;
65  unsigned int first_local_index = 0;
66 
67  if (serial || communicator.size() == 1)
68  return {n_local_samples, first_local_index};
69 
70  // Calculate the number of training parameters local to this processor
71  unsigned int quotient = n_global_samples/communicator.size();
72  unsigned int remainder = n_global_samples%communicator.size();
73  if (communicator.rank() < remainder)
74  {
75  n_local_samples = (quotient + 1);
76  first_local_index = communicator.rank()*(quotient+1);
77  }
78  else
79  {
80  n_local_samples = quotient;
81  first_local_index = communicator.rank()*quotient + remainder;
82  }
83  return {n_local_samples, first_local_index};
84 }
85 } // end anonymous namespace
86 
87 namespace libMesh
88 {
89 
90 // ------------------------------------------------------------
91 // RBConstructionBase implementation
92 
93 
94 template <class Base>
96  const std::string & name_in,
97  const unsigned int number_in)
98  : Base(es, name_in, number_in),
99  quiet_mode(true),
100  serial_training_set(false),
101  _training_parameters_initialized(false),
102  _first_local_index(0),
103  _n_local_training_samples(0),
104  _n_global_training_samples(0),
105  _training_parameters_random_seed(-1) // by default, use std::time to seed RNG
106 {
107 }
108 
109 template <class Base>
111 
112 template <class Base>
114 {
115  // clear the parent data
116  Base::clear();
117  RBParametrized::clear();
118  _training_parameters.clear();
119 }
120 
121 template <class Base>
123 {
124  Base::init_data();
125 
126  // Initialize the inner product storage vector, which is useful for
127  // storing intermediate results when evaluating inner products
128  inner_product_storage_vector = NumericVector<Number>::build(this->comm());
129  inner_product_storage_vector->init (this->n_dofs(), this->n_local_dofs(), false, PARALLEL);
130 }
131 
132 template <class Base>
134  std::pair<numeric_index_type, Real> & error_pair)
135 {
136  // Set error_pair.second to the maximum global value and also
137  // find which processor contains the maximum value
138  unsigned int proc_ID_index;
139  communicator.maxloc(error_pair.second, proc_ID_index);
140 
141  // Then broadcast error_pair.first from proc_ID_index
142  communicator.broadcast(error_pair.first, proc_ID_index);
143 }
144 
145 template <class Base>
147 {
148  libmesh_error_msg_if(!_training_parameters_initialized,
149  "Error: training parameters must first be initialized.");
150 
151  // First we check if there are no parameters here, and in that case we
152  // return 1 since a single training sample is sufficient to generate an
153  // RB approximation if there are no parameters. Note that in parallel,
154  // and when we don't have a serial training set, set return comm().size()
155  // so that each processor is assigned a single (empty) training sample.
156  if (_training_parameters.empty())
157  {
158  if (serial_training_set)
159  return 1;
160  else
161  return this->comm().size();
162  }
163 
164  return _n_global_training_samples;
165 }
166 
167 template <class Base>
169 {
170  libmesh_error_msg_if(!_training_parameters_initialized,
171  "Error: training parameters must first be initialized.");
172 
173  // First we check if there are no parameters here, and in that case we
174  // return 1 for both serial and parallel training sets. This is consistent
175  // with get_n_training_samples(), and avoids accessing
176  // training_parameters.begin() when training_parameters is empty.
177  if (_training_parameters.empty())
178  return 1;
179 
180  return _n_local_training_samples;
181 }
182 
183 template <class Base>
185 {
186  libmesh_error_msg_if(!_training_parameters_initialized,
187  "Error: training parameters must first be initialized.");
188 
189  // First we check if there are no parameters here, and in that case we
190  // return 0 for a serial training set and comm().rank() for a parallel
191  // training set. This is consistent with get_n_training_samples(), and
192  // avoids accessing training_parameters.begin() when training_parameters
193  // is empty.
194  if (_training_parameters.empty())
195  {
196  if (serial_training_set)
197  return 0;
198  else
199  return this->comm().rank();
200  }
201 
202  return _first_local_index;
203 }
204 
205 template <class Base>
207 {
208  libmesh_error_msg_if(!_training_parameters_initialized,
209  "Error: training parameters must first be initialized.");
210 
211  if (_training_parameters.empty())
212  return 0;
213 
214  return _first_local_index + _n_local_training_samples;
215 }
216 
217 template <class Base>
219 {
220  set_parameters(get_params_from_training_set(global_index));
221 }
222 
223 template <class Base>
225 {
226  libmesh_error_msg_if(!_training_parameters_initialized,
227  "Error: training parameters must first be initialized.");
228 
229  // If the _training_parameters are empty, return an empty RBParameters.
230  // Otherwise, create a new RBParameters object from the single sample requested.
231  RBParameters params;
232  if (!_training_parameters.empty())
233  {
234  libmesh_error_msg_if((global_index < this->get_first_local_training_index()) ||
235  (global_index >= this->get_last_local_training_index()),
236  "Error: index "
237  << global_index
238  << " must be within range: "
239  << this->get_first_local_training_index()
240  << " - "
241  << this->get_last_local_training_index());
242 
243  const numeric_index_type local_index = global_index - get_first_local_training_index();
244  for (const auto & [param_name, sample_vector] : _training_parameters)
245  params.set_value(param_name, sample_vector[local_index]);
246 
247  // Copy all extra values into the new RBParameters.
248  // We assume that the samples may be indexed differently for extra parameters,
249  // so we don't just copy the local_index value.
250  const auto & mine = get_parameters();
251  for (const auto & [key, extra_sample_vector] :
252  as_range(mine.extra_begin(), mine.extra_end()))
253  {
254  for (const auto idx : index_range(extra_sample_vector))
255  params.set_extra_value(key, idx, extra_sample_vector[idx]);
256  }
257  }
258 
259  return params;
260 }
261 
262 template <class Base>
264 {
265  libmesh_error_msg_if(!_training_parameters_initialized,
266  "Error: training parameters must first be initialized.");
267 
268  processor_id_type root_id = 0;
269  if ((this->get_first_local_training_index() <= global_index) &&
270  (global_index < this->get_last_local_training_index()))
271  {
272  // Set parameters on only one processor
273  set_params_from_training_set(global_index);
274 
275  // set root_id, only non-zero on one processor
276  root_id = this->processor_id();
277  }
278 
279  // broadcast
280  this->comm().max(root_id);
281  broadcast_parameters(root_id);
282 }
283 
284 template <class Base>
286  const RBParameters & mu_max,
287  const unsigned int n_global_training_samples,
288  const std::map<std::string,bool> & log_param_scale,
289  const bool deterministic)
290 {
291  if (!is_quiet())
292  {
293  // Print out some info about the training set initialization
294  libMesh::out << "Initializing training parameters with "
295  << (deterministic ? "deterministic " : "random " )
296  << "training set..." << std::endl;
297 
298  for (const auto & pr : log_param_scale)
299  libMesh::out << "Parameter "
300  << pr.first
301  << ": log scaling = "
302  << pr.second
303  << std::endl;
304 
305  libMesh::out << std::endl;
306  }
307 
308  if (deterministic)
309  {
310  const auto [first_local_index, last_local_index] =
311  generate_training_parameters_deterministic(this->comm(),
312  log_param_scale,
313  _training_parameters,
314  n_global_training_samples,
315  mu_min,
316  mu_max,
317  serial_training_set);
318  _first_local_index = first_local_index;
319  _n_local_training_samples = last_local_index-first_local_index;
320  }
321  else
322  {
323  // Generate random training samples for all parameters
324  const auto [first_local_index, last_local_index] =
325  generate_training_parameters_random(this->comm(),
326  log_param_scale,
327  _training_parameters,
328  n_global_training_samples,
329  mu_min,
330  mu_max,
331  this->_training_parameters_random_seed,
332  serial_training_set);
333  _first_local_index = first_local_index;
334  _n_local_training_samples = last_local_index-first_local_index;
335  }
336  _n_global_training_samples = _n_local_training_samples;
337 
338  if (!serial_training_set)
339  this->comm().sum(_n_global_training_samples);
340 
341  // For each parameter that only allows discrete values, we "snap" to the nearest
342  // allowable discrete value
343  if (get_n_discrete_params() > 0)
344  {
345  for (auto & [param_name, sample_vector] : _training_parameters)
346  {
347  if (is_discrete_parameter(param_name))
348  {
349  std::vector<Real> discrete_values =
350  get_discrete_parameter_values().find(param_name)->second;
351  for (const auto sample_idx : index_range(sample_vector))
352  {
353  // Round all values to the closest discrete value.
354  std::vector<Real> discretized_vector(sample_vector[sample_idx].size());
355  std::transform(sample_vector[sample_idx].cbegin(),
356  sample_vector[sample_idx].cend(),
357  discretized_vector.begin(),
358  [&discrete_values](const Real & val) {
359  return get_closest_value(val, discrete_values);
360  });
361  sample_vector[sample_idx] = discretized_vector;
362  }
363  }
364  }
365  }
366 
367  _training_parameters_initialized = true;
368 }
369 
370 template <class Base>
371 void RBConstructionBase<Base>::load_training_set(const std::map<std::string, std::vector<RBParameter>> & new_training_set)
372 {
373  // Make sure we're running this on all processors at the same time
374  libmesh_parallel_only(this->comm());
375 
376  // First, make sure that an initial training set has already been generated
377  libmesh_error_msg_if(!_training_parameters_initialized,
378  "Error: load_training_set cannot be used to initialize parameters");
379 
380  // Make sure that the training set has the correct number of parameters
381  const unsigned int n_params = get_n_params();
382  libmesh_error_msg_if(new_training_set.size() > n_params,
383  "Error: new_training_set should not have more than get_n_params() parameters.");
384 
385  // Check that (new_training_set.size() == get_n_params()) is the same on all processes so that
386  // we go into the same branch of the "if" statement below on all processes.
387  const bool size_matches = (new_training_set.size() == n_params);
388  this->comm().verify(size_matches);
389 
390  if (size_matches)
391  {
392  // If new_training_set stores values for all parameters, then we overwrite
393  // _training_parameters with new_training_set.
394 
395  // Get the number of local and global training parameters
396  _first_local_index = 0;
397  _n_local_training_samples =
398  cast_int<numeric_index_type>(new_training_set.begin()->second.size());
399  _n_global_training_samples = _n_local_training_samples;
400 
401  if (!serial_training_set)
402  {
403  this->comm().sum(_n_global_training_samples);
404 
405  // Set the first/last indices.
406  std::vector<numeric_index_type> local_sizes (this->n_processors(), 0);
407  local_sizes[this->processor_id()] = _n_local_training_samples;
408  this->comm().sum(local_sizes);
409 
410  // first_local_index is the sum of local_sizes
411  // for all processor ids less than ours
412  for (auto p : make_range(this->processor_id()))
413  _first_local_index += local_sizes[p];
414  }
415 
416  // Ensure that the parameters are the same.
417  for (const auto & pr : _training_parameters)
418  libmesh_error_msg_if(!new_training_set.count(pr.first),
419  "Parameters must be identical in order to overwrite dataset.");
420 
421  // Copy the values from the new_training_set to the internal training_parameters.
422  _training_parameters = new_training_set;
423  }
424  else
425  {
426  // If new_training_set stores values for a subset of the parameters, then we keep the
427  // length of training_parameters unchanged and overwrite the entries of the specified
428  // parameters from new_training_set. Note that we repeatedly loop over new_training_set
429  // to fill up the entire length of the sample_vector.
430  for (auto & [param_name, sample_vector]: _training_parameters)
431  {
432  if (new_training_set.count(param_name))
433  {
434  for (const auto i : make_range(get_local_n_training_samples()))
435  {
436  const unsigned int num_new_samples = libmesh_map_find(new_training_set,param_name).size();
437  libmesh_error_msg_if (num_new_samples==0, "new_training_set set should not be empty");
438 
439  const unsigned int new_training_set_index = i % num_new_samples;
440  sample_vector[i] = libmesh_map_find(new_training_set,param_name)[new_training_set_index];
441  }
442  }
443  }
444  }
445 }
446 
447 template <class Base>
449  const std::string & param_name, const std::vector<RBParameter> & values)
450 {
451  libmesh_error_msg_if(!_training_parameters_initialized,
452  "Training parameters must be initialized before calling set_training_parameter_values");
453  libmesh_error_msg_if(values.size() != get_local_n_training_samples(),
454  "Inconsistent sizes");
455 
456  // Copy the new data, overwriting the old data.
457  auto & training_vector = libmesh_map_find(_training_parameters, param_name);
458  training_vector = values;
459 }
460 
461 
462 template <class Base>
463 std::pair<std::size_t, std::size_t>
465  const std::map<std::string, bool> & log_param_scale,
466  std::map<std::string, std::vector<RBParameter>> & local_training_parameters_in,
467  const unsigned int n_global_training_samples_in,
468  const RBParameters & min_parameters,
469  const RBParameters & max_parameters,
470  const int training_parameters_random_seed,
471  const bool serial_training_set)
472 {
473  const unsigned int num_params = min_parameters.n_parameters();
474  libmesh_error_msg_if(num_params!=max_parameters.n_parameters(),
475  "Number of parameters must be identical for min/max.");
476 
477  // Clear training_parameters_in
478  local_training_parameters_in.clear();
479 
480  if (num_params == 0)
481  return {0,0};
482 
483  if (training_parameters_random_seed < 0)
484  {
485  if (!serial_training_set)
486  {
487  // seed the random number generator with the system time
488  // and the processor ID so that the seed is different
489  // on different processors
490  std::srand( static_cast<unsigned>( std::time(0)*(1+communicator.rank()) ));
491  }
492  else
493  {
494  // seed the random number generator with the system time
495  // only so that the seed is the same on all processors
496  //
497  // Note that we broadcast the time on processor 0 to make
498  // sure all processors agree.
499  unsigned int current_time = static_cast<unsigned>( std::time(0) );
500  communicator.broadcast(current_time, 0);
501  std::srand(current_time);
502  }
503  }
504  else
505  {
506  if (!serial_training_set)
507  {
508  // seed the random number generator with the provided value
509  // and the processor ID so that the seed is different
510  // on different processors
511  std::srand( static_cast<unsigned>( training_parameters_random_seed*(1+communicator.rank()) ));
512  }
513  else
514  {
515  // seed the random number generator with the provided value
516  // so that the seed is the same on all processors
517  std::srand( static_cast<unsigned>( training_parameters_random_seed ));
518  }
519  }
520 
521  // TODO - we don't support vector-data here yet. This would only apply in the case where
522  // min or max are vector-valued, and all the generated points need to stay within those ranges.
523  // But typically we expect that if we're calling this function, we only have 1 min and 1 max,
524  // so the generated values are single-valued as well. The .get_value() calls will throw an error
525  // if this is not the case.
526 
527  // initialize training_parameters_in
528  const auto & [n_local_training_samples, first_local_index] =
529  calculate_n_local_samples_and_index(communicator, n_global_training_samples_in,
530  serial_training_set);
531  for (const auto & pr : min_parameters)
532  local_training_parameters_in[pr.first] = std::vector<RBParameter>(n_local_training_samples);
533 
534  // finally, set the values
535  for (auto & [param_name, sample_vector] : local_training_parameters_in)
536  {
537  for (auto i : make_range(n_local_training_samples))
538  {
539  Real random_number = static_cast<Real>(std::rand()) / RAND_MAX; // in range [0,1]
540 
541  // Generate log10 scaled training parameters
542  if (libmesh_map_find(log_param_scale, param_name))
543  {
544  Real log_min = std::log10(min_parameters.get_value(param_name));
545  Real log_range = std::log10(max_parameters.get_value(param_name) / min_parameters.get_value(param_name));
546 
547  sample_vector[i] = {std::pow(10., log_min + random_number*log_range )};
548  }
549  // Generate linearly scaled training parameters
550  else
551  {
552  sample_vector[i] = {
553  random_number * (max_parameters.get_value(param_name) -
554  min_parameters.get_value(param_name)) +
555  min_parameters.get_value(param_name)};
556  }
557  }
558  }
559  return {first_local_index, first_local_index+n_local_training_samples};
560 }
561 
562 template <class Base>
563 std::pair<std::size_t, std::size_t>
565  const std::map<std::string, bool> & log_param_scale,
566  std::map<std::string, std::vector<RBParameter>> & local_training_parameters_in,
567  const unsigned int n_global_training_samples_in,
568  const RBParameters & min_parameters,
569  const RBParameters & max_parameters,
570  const bool serial_training_set)
571 {
572  libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() );
573  const unsigned int num_params = min_parameters.n_parameters();
574 
575  if (num_params == 0)
576  return {0,0};
577 
578  if (num_params > 3)
579  libmesh_not_implemented_msg("ERROR: Deterministic training sample generation "
580  "not implemented for more than three parameters.");
581 
582  // TODO - we don't support vector-data here yet. This would only apply in the case where
583  // min or max are vector-valued, and all the generated points need to stay within those ranges.
584  // But typically we expect that if we're calling this function, we only have 1 min and 1 max,
585  // so the generated values are single-valued as well. The .get_value() calls will throw an error
586  // if this is not the case.
587 
588  // Reinitialize training_parameters_in (but don't remove existing keys!)
589  const auto &[n_local_training_samples, first_local_index] =
590  calculate_n_local_samples_and_index(communicator, n_global_training_samples_in,
591  serial_training_set);
592  const auto last_local_index = first_local_index + n_local_training_samples;
593  for (const auto & pr : min_parameters)
594  local_training_parameters_in[pr.first] = std::vector<RBParameter>(n_local_training_samples);
595 
596  // n_training_samples_per_param has 3 entries, but entries after "num_params"
597  // are unused so we just set their value to 1. We need to set it to 1 (rather
598  // than 0) so that we don't skip the inner part of the triply-nested loop over
599  // n_training_samples_per_param below.
600  std::vector<unsigned int> n_training_samples_per_param(3);
601  for (unsigned int param=0; param<3; param++)
602  {
603  if (param < num_params)
604  {
605  n_training_samples_per_param[param] =
606  static_cast<unsigned int>( std::round(std::pow(static_cast<Real>(n_global_training_samples_in), 1./num_params)) );
607  }
608  else
609  {
610  n_training_samples_per_param[param] = 1;
611  }
612  }
613 
614  {
615  // The current implementation assumes that we have the same number of
616  // samples in each parameter, so we check that n_training_samples_in
617  // is consistent with this assumption.
618  unsigned int total_samples_check = 1;
619  for (unsigned int n_samples : n_training_samples_per_param)
620  {
621  total_samples_check *= n_samples;
622  }
623 
624  libmesh_error_msg_if(total_samples_check != n_global_training_samples_in,
625  "Error: Number of training samples = "
626  << n_global_training_samples_in
627  << " does not enable a uniform grid of samples with "
628  << num_params << " parameters. Try "
629  << total_samples_check << " samples instead?");
630  }
631 
632  // First we make a list of training samples associated with each parameter,
633  // then we take a tensor product to obtain the final set of training samples.
634  std::vector<std::vector<Real>> training_samples_per_param(num_params);
635  {
636  unsigned int i = 0;
637  for (const auto & pr : min_parameters)
638  {
639  const std::string & param_name = pr.first;
640  const bool use_log_scaling = libmesh_map_find(log_param_scale, param_name);
641  Real min_param = min_parameters.get_value(param_name);
642  Real max_param = max_parameters.get_value(param_name);
643 
644  training_samples_per_param[i].resize(n_training_samples_per_param[i]);
645 
646  for (unsigned int j=0; j<n_training_samples_per_param[i]; j++)
647  {
648  // Generate log10 scaled training parameters
649  if (use_log_scaling)
650  {
651  Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts
652  Real log_min = std::log10(min_param + epsilon);
653  Real log_range = std::log10( (max_param-epsilon) / (min_param+epsilon) );
654  Real step_size = log_range /
655  std::max((unsigned int)1,(n_training_samples_per_param[i]-1));
656 
657  if (j<(n_training_samples_per_param[i]-1))
658  {
659  training_samples_per_param[i][j] = std::pow(10., log_min + j*step_size );
660  }
661  else
662  {
663  // due to rounding error, the last parameter can be slightly
664  // bigger than max_parameters, hence snap back to the max
665  training_samples_per_param[i][j] = max_param;
666  }
667  }
668  else
669  {
670  // Generate linearly scaled training parameters
671  Real step_size = (max_param - min_param) /
672  std::max((unsigned int)1,(n_training_samples_per_param[i]-1));
673  training_samples_per_param[i][j] = j*step_size + min_param;
674  }
675 
676  }
677  i++;
678  }
679  }
680 
681  // Now load into training_samples_in
682  {
683  std::vector<unsigned int> indices(3);
684  unsigned int index_count = 0;
685  for (indices[0]=0; indices[0]<n_training_samples_per_param[0]; indices[0]++)
686  {
687  for (indices[1]=0; indices[1]<n_training_samples_per_param[1]; indices[1]++)
688  {
689  for (indices[2]=0; indices[2]<n_training_samples_per_param[2]; indices[2]++)
690  {
691  unsigned int param_count = 0;
692  for (const auto & pr : min_parameters)
693  {
694  std::vector<RBParameter> & training_vector =
695  libmesh_map_find(local_training_parameters_in, pr.first);
696  if (first_local_index <= index_count && index_count < last_local_index)
697  training_vector[index_count - first_local_index] =
698  {training_samples_per_param[param_count][indices[param_count]]};
699 
700  param_count++;
701  }
702  index_count++;
703  }
704  }
705  }
706  }
707  return {first_local_index, first_local_index+n_local_training_samples};
708 }
709 
710 
711 template <class Base>
712 void RBConstructionBase<Base>::broadcast_parameters(const unsigned int proc_id)
713 {
714  libmesh_assert_less (proc_id, this->n_processors());
715 
716  // create a copy of the current parameters
717  RBParameters current_parameters = get_parameters();
718  libmesh_error_msg_if(current_parameters.n_samples()!=1,
719  "Only single-sample RBParameter objects can be broadcast.");
720 
721  // Serialize the current_parameters to current_parameters_vector in order to broadcast.
722  // We handle multiple samples and vector values.
723  // However, the vector values are assumed to remain the same size across samples.
724  const std::size_t nparams = current_parameters.n_parameters();
725  const std::size_t nsamples = current_parameters.n_samples();
726 
727  // First we get the sizes of all the parameter value vectors.
728  std::vector<std::size_t> param_value_sizes;
729  param_value_sizes.reserve(nparams);
730  for (const auto & pr : current_parameters)
731  param_value_sizes.push_back(pr.second[0].size());
732 
733  // Broadcast the sizes vector and reserve memory.
734  this->comm().broadcast(param_value_sizes, proc_id);
735  std::size_t buffsize = std::accumulate(param_value_sizes.cbegin(), param_value_sizes.cend(), 0ul);
736  std::vector<Real> serialized_parameters;
737  serialized_parameters.reserve(buffsize);
738 
739  // Then we serialize the parameters/sample/value vectors into a single vector.
740  for (const auto & pr : current_parameters)
741  {
742  for (const auto sample_idx : make_range(nsamples))
743  serialized_parameters.insert(serialized_parameters.end(),
744  pr.second[sample_idx].cbegin(),
745  pr.second[sample_idx].cend());
746  }
747 
748  // Do the broadcasts.
749  this->comm().broadcast(serialized_parameters, proc_id);
750 
751  // Deserialize into the copy of the RBParameters object.
752  std::size_t param_idx = 0;
753  auto val_idx = serialized_parameters.cbegin();
754  for (const auto & pr : current_parameters)
755  {
756  const std::size_t param_value_size = param_value_sizes[param_idx];
757  for (const auto sample_idx: make_range(nsamples))
758  {
759  auto end_val_idx = std::next(val_idx,param_value_size);
760  RBParameter sample_val(val_idx, end_val_idx);
761  current_parameters.set_value(pr.first, sample_idx, sample_val);
762  val_idx = end_val_idx;
763  }
764  ++param_idx;
765  }
766 
767  // Overwrite the parameters globally.
768  set_parameters(current_parameters);
769 }
770 
771 template <class Base>
773 {
774  this->_training_parameters_random_seed = seed;
775 }
776 
777 // Template specializations
778 
779 // EigenSystem is only defined if we have SLEPc
780 #if defined(LIBMESH_HAVE_SLEPC)
781 template class LIBMESH_EXPORT RBConstructionBase<CondensedEigenSystem>;
782 #endif
783 
784 template class LIBMESH_EXPORT RBConstructionBase<LinearImplicitSystem>;
785 template class LIBMESH_EXPORT RBConstructionBase<System>;
786 
787 } // namespace libMesh
Real get_value(const std::string &param_name) const
Get the value of the specified parameter, throw an error if it does not exist.
Definition: rb_parameters.C:65
This is the EquationSystems class.
Provides a uniform interface to vector storage schemes for different linear algebra libraries...
Definition: vector_fe_ex5.C:43
The libMesh namespace provides an interface to certain functionality in the library.
uint8_t processor_id_type
Definition: id_types.h:104
unsigned int n_parameters() const
Get the number of parameters that have been added.
T pow(const T &x)
Definition: utility.h:328
dof_id_type numeric_index_type
Definition: id_types.h:99
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
RBConstructionBase(EquationSystems &es, const std::string &name, const unsigned int number)
Constructor.
std::vector< Real > RBParameter
Typedef for an individual RB parameter.
Definition: rb_parameters.h:39
This class is part of the rbOOmit framework.
Definition: rb_parameters.h:52
unsigned int n_samples() const
Returns the number of samples stored for all parameters.
static const unsigned int next[3]
A lookup table for the increment modulo 3 operation, for iterating through the three nodes per elemen...
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
void set_extra_value(const std::string &param_name, Real value)
Set the value of the specified extra parameter.
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator
void set_value(const std::string &param_name, Real value)
Set the value of the specified parameter.
OStreamProxy out
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:134
This class is part of the rbOOmit framework.
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:111
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.