www.mooseframework.org
NonlinearSystemBase.C
Go to the documentation of this file.
1 /****************************************************************/
2 /* DO NOT MODIFY THIS HEADER */
3 /* MOOSE - Multiphysics Object Oriented Simulation Environment */
4 /* */
5 /* (c) 2010 Battelle Energy Alliance, LLC */
6 /* ALL RIGHTS RESERVED */
7 /* */
8 /* Prepared by Battelle Energy Alliance, LLC */
9 /* Under Contract No. DE-AC07-05ID14517 */
10 /* With the U. S. Department of Energy */
11 /* */
12 /* See COPYRIGHT for full restrictions */
13 /****************************************************************/
14 
15 #include "NonlinearSystemBase.h"
16 #include "AuxiliarySystem.h"
17 #include "Problem.h"
18 #include "FEProblem.h"
19 #include "MooseVariable.h"
20 #include "MooseVariableScalar.h"
21 #include "PetscSupport.h"
22 #include "Factory.h"
23 #include "ParallelUniqueId.h"
24 #include "ThreadedElementLoop.h"
25 #include "MaterialData.h"
26 #include "ComputeResidualThread.h"
27 #include "ComputeJacobianThread.h"
30 #include "ComputeDiracThread.h"
37 #include "TimeKernel.h"
38 #include "BoundaryCondition.h"
39 #include "PresetNodalBC.h"
40 #include "NodalBC.h"
41 #include "IntegratedBC.h"
42 #include "DGKernel.h"
43 #include "InterfaceKernel.h"
44 #include "ElementDamper.h"
45 #include "NodalDamper.h"
46 #include "GeneralDamper.h"
47 #include "DisplacedProblem.h"
48 #include "NearestNodeLocator.h"
49 #include "PenetrationLocator.h"
50 #include "NodalConstraint.h"
51 #include "NodeFaceConstraint.h"
52 #include "FaceFaceConstraint.h"
53 #include "ElemElemConstraint.h"
54 #include "ScalarKernel.h"
55 #include "Parser.h"
56 #include "Split.h"
58 #include "MooseMesh.h"
59 #include "MooseUtils.h"
60 #include "MooseApp.h"
61 #include "NodalKernel.h"
62 #include "DiracKernel.h"
63 #include "NodalKernel.h"
64 #include "TimeIntegrator.h"
65 #include "Predictor.h"
66 #include "Assembly.h"
67 #include "ElementPairLocator.h"
68 #include "ODETimeKernel.h"
70 
71 // libMesh
72 #include "libmesh/nonlinear_solver.h"
73 #include "libmesh/quadrature_gauss.h"
74 #include "libmesh/dense_vector.h"
75 #include "libmesh/boundary_info.h"
76 #include "libmesh/petsc_matrix.h"
77 #include "libmesh/petsc_vector.h"
78 #include "libmesh/petsc_nonlinear_solver.h"
79 #include "libmesh/numeric_vector.h"
80 #include "libmesh/mesh.h"
81 #include "libmesh/dense_subvector.h"
82 #include "libmesh/dense_submatrix.h"
83 #include "libmesh/dof_map.h"
84 #include "libmesh/sparse_matrix.h"
85 #include "libmesh/petsc_matrix.h"
86 
87 // PETSc
88 #ifdef LIBMESH_HAVE_PETSC
89 #include "petscsnes.h"
90 #if !PETSC_VERSION_LESS_THAN(3, 3, 0)
91 #include <PetscDMMoose.h>
92 EXTERN_C_BEGIN
93 extern PetscErrorCode DMCreate_Moose(DM);
94 EXTERN_C_END
95 #endif
96 #endif
97 
99  System & sys,
100  const std::string & name)
101  : SystemBase(fe_problem, name, Moose::VAR_NONLINEAR),
102  ConsoleStreamInterface(fe_problem.getMooseApp()),
103  _fe_problem(fe_problem),
104  _sys(sys),
105  _last_rnorm(0.),
106  _last_nl_rnorm(0.),
107  _l_abs_step_tol(1e-10),
108  _initial_residual_before_preset_bcs(0.),
109  _initial_residual_after_preset_bcs(0.),
110  _current_nl_its(0),
111  _compute_initial_residual_before_preset_bcs(true),
112  _current_solution(NULL),
113  _residual_ghosted(NULL),
114  _serialized_solution(*NumericVector<Number>::build(_communicator).release()),
115  _solution_previous_nl(NULL),
116  _residual_copy(*NumericVector<Number>::build(_communicator).release()),
117  _u_dot(&addVector("u_dot", true, GHOSTED)),
118  _Re_time(NULL),
119  _Re_non_time(&addVector("Re_non_time", false, GHOSTED)),
120  _scalar_kernels(/*threaded=*/false),
121  _nodal_bcs(/*threaded=*/false),
122  _preset_nodal_bcs(/*threaded=*/false),
123  _splits(/*threaded=*/false),
124  _increment_vec(NULL),
125  _pc_side(Moose::PCS_DEFAULT),
126  _ksp_norm(Moose::KSPN_UNPRECONDITIONED),
127  _use_finite_differenced_preconditioner(false),
128  _have_decomposition(false),
129  _use_field_split_preconditioner(false),
130  _add_implicit_geometric_coupling_entries_to_jacobian(false),
131  _assemble_constraints_separately(false),
132  _need_serialized_solution(false),
133  _need_residual_copy(false),
134  _need_residual_ghosted(false),
135  _debugging_residuals(false),
136  _doing_dg(false),
137  _n_iters(0),
138  _n_linear_iters(0),
139  _n_residual_evaluations(0),
140  _final_residual(0.),
141  _computing_initial_residual(false),
142  _print_all_var_norms(false),
143  _has_save_in(false),
144  _has_diag_save_in(false),
145  _has_nodalbc_save_in(false),
146  _has_nodalbc_diag_save_in(false)
147 {
148 }
149 
151 {
152  delete &_serialized_solution;
153  delete &_residual_copy;
154 }
155 
156 void
158 {
159  if (_fe_problem.hasDampers())
160  setupDampers();
161 
162  _current_solution = _sys.current_local_solution.get();
163 
165  _serialized_solution.init(_sys.n_dofs(), false, SERIAL);
166 
168  _residual_copy.init(_sys.n_dofs(), false, SERIAL);
169 }
170 
171 void
173 {
174  system().set_basic_system_only();
175  nonlinearSolver()->jacobian = NULL;
176 }
177 
178 void
180 {
182  _solution_previous_nl = &addVector("u_previous_newton", true, GHOSTED);
183 }
184 
185 void
187 {
188  // call parent
190  // and update _current_solution
191  _current_solution = _sys.current_local_solution.get();
192 }
193 
194 void
196 {
197  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
198  {
199  _kernels.initialSetup(tid);
202  if (_doing_dg)
208  }
213 }
214 
215 void
217 {
218  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
219  {
220  _kernels.timestepSetup(tid);
223  if (_doing_dg)
229  }
234 }
235 
236 void
237 NonlinearSystemBase::setDecomposition(const std::vector<std::string> & splits)
238 {
240  if (splits.size() && splits.size() != 1)
241  {
242  std::ostringstream err;
243  err << "Only a single top-level split is allowed in a Problem's decomposition.";
244  mooseError(err.str());
245  }
246  if (splits.size())
247  {
248  _decomposition_split = splits[0];
249  _have_decomposition = true;
250  }
251  else
252  {
253  _have_decomposition = false;
254  }
255 }
256 
257 void
259 {
260  if (!_have_decomposition)
261  return;
262 
263  std::shared_ptr<Split> top_split = getSplit(_decomposition_split);
264  top_split->setup();
265 }
266 
267 void
269  const std::string & name,
270  InputParameters parameters)
271 {
272  parameters.set<SystemBase *>("_sys") = this;
273 
274  std::shared_ptr<TimeIntegrator> ti = _factory.create<TimeIntegrator>(type, name, parameters);
275  _time_integrator = ti;
276 }
277 
278 void
279 NonlinearSystemBase::addKernel(const std::string & kernel_name,
280  const std::string & name,
281  InputParameters parameters)
282 {
283  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
284  {
285  // Create the kernel object via the factory and add to warehouse
286  std::shared_ptr<KernelBase> kernel =
287  _factory.create<KernelBase>(kernel_name, name, parameters, tid);
288  _kernels.addObject(kernel, tid);
289 
290  // Store time/non-time kernels separately
291  std::shared_ptr<TimeKernel> t_kernel = std::dynamic_pointer_cast<TimeKernel>(kernel);
292  if (t_kernel)
293  _time_kernels.addObject(kernel, tid);
294  else
295  _non_time_kernels.addObject(kernel, tid);
296 
297  addEigenKernels(kernel, tid);
298  }
299 
300  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
301  _has_save_in = true;
302  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
303  _has_diag_save_in = true;
304 }
305 
306 void
307 NonlinearSystemBase::addNodalKernel(const std::string & kernel_name,
308  const std::string & name,
309  InputParameters parameters)
310 {
311  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
312  {
313  // Create the kernel object via the factory and add to the warehouse
314  std::shared_ptr<NodalKernel> kernel =
315  _factory.create<NodalKernel>(kernel_name, name, parameters, tid);
316  _nodal_kernels.addObject(kernel, tid);
317  }
318 
319  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
320  _has_save_in = true;
321  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
322  _has_diag_save_in = true;
323 }
324 
325 void
326 NonlinearSystemBase::addScalarKernel(const std::string & kernel_name,
327  const std::string & name,
328  InputParameters parameters)
329 {
330  std::shared_ptr<ScalarKernel> kernel =
331  _factory.create<ScalarKernel>(kernel_name, name, parameters);
332  _scalar_kernels.addObject(kernel);
333 
334  // Store time/non-time ScalarKernels separately
335  ODETimeKernel * t_kernel = dynamic_cast<ODETimeKernel *>(kernel.get());
336 
337  if (t_kernel)
339  else
341 }
342 
343 void
344 NonlinearSystemBase::addBoundaryCondition(const std::string & bc_name,
345  const std::string & name,
346  InputParameters parameters)
347 {
348  // ThreadID
349  THREAD_ID tid = 0;
350 
351  // Create the object
352  std::shared_ptr<BoundaryCondition> bc =
353  _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
354 
355  // Active BoundaryIDs for the object
356  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
357  _vars[tid].addBoundaryVar(boundary_ids, &bc->variable());
358 
359  // Cast to the various types of BCs
360  std::shared_ptr<NodalBC> nbc = std::dynamic_pointer_cast<NodalBC>(bc);
361  std::shared_ptr<IntegratedBC> ibc = std::dynamic_pointer_cast<IntegratedBC>(bc);
362 
363  // NodalBC
364  if (nbc)
365  {
366  _nodal_bcs.addObject(nbc);
367  _vars[tid].addBoundaryVars(boundary_ids, nbc->getCoupledVars());
368 
369  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
370  _has_nodalbc_save_in = true;
371  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
373 
374  // PresetNodalBC
375  std::shared_ptr<PresetNodalBC> pnbc = std::dynamic_pointer_cast<PresetNodalBC>(bc);
376  if (pnbc)
378  }
379 
380  // IntegratedBC
381  else if (ibc)
382  {
383  _integrated_bcs.addObject(ibc, tid);
384  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
385 
386  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
387  _has_save_in = true;
388  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
389  _has_diag_save_in = true;
390 
391  for (tid = 1; tid < libMesh::n_threads(); tid++)
392  {
393  // Create the object
394  bc = _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
395 
396  // Active BoundaryIDs for the object
397  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
398  _vars[tid].addBoundaryVar(boundary_ids, &bc->variable());
399 
400  ibc = std::static_pointer_cast<IntegratedBC>(bc);
401 
402  _integrated_bcs.addObject(ibc, tid);
403  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
404  }
405  }
406 
407  else
408  mooseError("Unknown BoundaryCondition type for object named ", bc->name());
409 }
410 
411 void
412 NonlinearSystemBase::addConstraint(const std::string & c_name,
413  const std::string & name,
414  InputParameters parameters)
415 {
416  std::shared_ptr<Constraint> constraint = _factory.create<Constraint>(c_name, name, parameters);
417  _constraints.addObject(constraint);
418 
419  if (constraint && constraint->addCouplingEntriesToJacobian())
421 }
422 
423 void
424 NonlinearSystemBase::addDiracKernel(const std::string & kernel_name,
425  const std::string & name,
426  InputParameters parameters)
427 {
428  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
429  {
430  std::shared_ptr<DiracKernel> kernel =
431  _factory.create<DiracKernel>(kernel_name, name, parameters, tid);
432  _dirac_kernels.addObject(kernel, tid);
433  }
434 }
435 
436 void
437 NonlinearSystemBase::addDGKernel(std::string dg_kernel_name,
438  const std::string & name,
439  InputParameters parameters)
440 {
441  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
442  {
443  std::shared_ptr<DGKernel> dg_kernel =
444  _factory.create<DGKernel>(dg_kernel_name, name, parameters, tid);
445  _dg_kernels.addObject(dg_kernel, tid);
446  }
447 
448  _doing_dg = true;
449 }
450 
451 void
452 NonlinearSystemBase::addInterfaceKernel(std::string interface_kernel_name,
453  const std::string & name,
454  InputParameters parameters)
455 {
456  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
457  {
458  std::shared_ptr<InterfaceKernel> interface_kernel =
459  _factory.create<InterfaceKernel>(interface_kernel_name, name, parameters, tid);
460 
461  const std::set<BoundaryID> & boundary_ids = interface_kernel->boundaryIDs();
462  _vars[tid].addBoundaryVar(boundary_ids, &interface_kernel->variable());
463 
464  _interface_kernels.addObject(interface_kernel, tid);
465  _vars[tid].addBoundaryVars(boundary_ids, interface_kernel->getCoupledVars());
466  }
467 
468  _doing_dg = true;
469 }
470 
471 void
472 NonlinearSystemBase::addDamper(const std::string & damper_name,
473  const std::string & name,
474  InputParameters parameters)
475 {
476  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
477  {
478  std::shared_ptr<Damper> damper = _factory.create<Damper>(damper_name, name, parameters, tid);
479 
480  // Attempt to cast to the damper types
481  std::shared_ptr<ElementDamper> ed = std::dynamic_pointer_cast<ElementDamper>(damper);
482  std::shared_ptr<NodalDamper> nd = std::dynamic_pointer_cast<NodalDamper>(damper);
483  std::shared_ptr<GeneralDamper> gd = std::dynamic_pointer_cast<GeneralDamper>(damper);
484 
485  if (gd)
486  {
488  break; // not threaded
489  }
490  else if (ed)
491  _element_dampers.addObject(ed, tid);
492  else if (nd)
493  _nodal_dampers.addObject(nd, tid);
494  else
495  mooseError("Invalid damper type");
496  }
497 }
498 
499 void
500 NonlinearSystemBase::addSplit(const std::string & split_name,
501  const std::string & name,
502  InputParameters parameters)
503 {
504  std::shared_ptr<Split> split = _factory.create<Split>(split_name, name, parameters);
505  _splits.addObject(split);
506 }
507 
508 std::shared_ptr<Split>
510 {
511  return _splits.getActiveObject(name);
512 }
513 
514 void
515 NonlinearSystemBase::zeroVectorForResidual(const std::string & vector_name)
516 {
517  for (unsigned int i = 0; i < _vecs_to_zero_for_residual.size(); ++i)
518  if (vector_name == _vecs_to_zero_for_residual[i])
519  return;
520 
521  _vecs_to_zero_for_residual.push_back(vector_name);
522 }
523 
524 void
526 {
527  Moose::perf_log.push("compute_residual()", "Execution");
528 
530 
532 
533  for (const auto & numeric_vec : _vecs_to_zero_for_residual)
534  if (hasVector(numeric_vec))
535  {
536  NumericVector<Number> & vec = getVector(numeric_vec);
537  vec.close();
538  vec.zero();
539  }
540 
541  try
542  {
543  residual.zero();
544  if (_Re_time)
545  _Re_time->zero();
546  _Re_non_time->zero();
548  if (_Re_time)
549  _Re_time->close();
550  _Re_non_time->close();
551  if (_time_integrator)
552  _time_integrator->postStep(residual);
553  else
554  residual += *_Re_non_time;
555  residual.close();
556 
557  computeNodalBCs(residual, type);
558 
559  // If we are debugging residuals we need one more assignment to have the ghosted copy up to date
561  {
562  *_residual_ghosted = residual;
563  _residual_ghosted->close();
564  }
565 
566  // Need to close and update the aux system in case residuals were saved to it.
569  if (hasSaveIn())
571  }
572  catch (MooseException & e)
573  {
574  // The buck stops here, we have already handled the exception by
575  // calling stopSolve(), it is now up to PETSc to return a
576  // "diverged" reason during the next solve.
577  }
578 
579  Moose::enableFPE(false);
580 
581  Moose::perf_log.pop("compute_residual()", "Execution");
582 }
583 
584 void
586 {
587  if (_time_integrator)
588  _time_integrator->preSolve();
589  if (_predictor.get())
590  _predictor->timestepSetup();
591 }
592 
593 void
595 {
596  NumericVector<Number> & initial_solution(solution());
597  if (_predictor.get() && _predictor->shouldApply())
598  {
599  _predictor->apply(initial_solution);
600  _fe_problem.predictorCleanup(initial_solution);
601  }
602 
603  // do nodal BC
605  for (const auto & bnode : bnd_nodes)
606  {
607  BoundaryID boundary_id = bnode->_bnd_id;
608  Node * node = bnode->_node;
609 
610  if (node->processor_id() == processor_id())
611  {
612  // reinit variables in nodes
613  _fe_problem.reinitNodeFace(node, boundary_id, 0);
614 
616  {
617  const auto & preset_bcs = _preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
618  for (const auto & preset_bc : preset_bcs)
619  preset_bc->computeValue(initial_solution);
620  }
621  }
622  }
623 
624  _sys.solution->close();
625  update();
626 
627  // Set constraint slave values
628  setConstraintSlaveValues(initial_solution, false);
629 
631  setConstraintSlaveValues(initial_solution, true);
632 }
633 
634 void
635 NonlinearSystemBase::setPredictor(std::shared_ptr<Predictor> predictor)
636 {
637  _predictor = predictor;
638 }
639 
640 void
642 {
643  _kernels.subdomainSetup(subdomain, tid);
644  _nodal_kernels.subdomainSetup(subdomain, tid);
645  _element_dampers.subdomainSetup(subdomain, tid);
646  _nodal_dampers.subdomainSetup(subdomain, tid);
647 }
648 
649 NumericVector<Number> &
651 {
652  return *_u_dot;
653 }
654 
655 NumericVector<Number> &
657 {
658  switch (type)
659  {
660  case Moose::KT_TIME:
661  if (!_Re_time)
662  _Re_time = &addVector("Re_time", false, GHOSTED);
663  return *_Re_time;
664  case Moose::KT_NONTIME:
665  return *_Re_non_time;
666  case Moose::KT_ALL:
667  return *_Re_non_time;
668 
669  default:
670  mooseError("Trying to get residual vector that is not available");
671  }
672 }
673 
674 bool
676 {
677  switch (type)
678  {
679  case Moose::KT_TIME:
680  return _Re_time;
681  case Moose::KT_NONTIME:
682  return _Re_non_time;
683  case Moose::KT_ALL:
684  return _Re_non_time;
685 
686  default:
687  mooseError("Trying to get residual vector that is not available");
688  }
689 }
690 
691 void
693 {
694  if (_time_integrator)
695  {
696  _time_integrator->preStep();
697  _time_integrator->computeTimeDerivatives();
698  }
699 }
700 
701 void
703 {
704  THREAD_ID tid = 0; // constraints are going to be done single-threaded
705  residual.close();
707  {
708  const auto & ncs = _constraints.getActiveNodalConstraints();
709  for (const auto & nc : ncs)
710  {
711  std::vector<dof_id_type> & slave_node_ids = nc->getSlaveNodeId();
712  std::vector<dof_id_type> & master_node_ids = nc->getMasterNodeId();
713 
714  if ((slave_node_ids.size() > 0) && (master_node_ids.size() > 0))
715  {
716  _fe_problem.reinitNodes(master_node_ids, tid);
717  _fe_problem.reinitNodesNeighbor(slave_node_ids, tid);
718  nc->computeResidual(residual);
719  }
720  }
721  _fe_problem.addCachedResidualDirectly(residual, tid);
722  residual.close();
723  }
724 }
725 
726 void
728 {
729  THREAD_ID tid = 0; // constraints are going to be done single-threaded
730  jacobian.close();
732  {
733  const auto & ncs = _constraints.getActiveNodalConstraints();
734  for (const auto & nc : ncs)
735  {
736  std::vector<dof_id_type> & slave_node_ids = nc->getSlaveNodeId();
737  std::vector<dof_id_type> & master_node_ids = nc->getMasterNodeId();
738 
739  if ((slave_node_ids.size() > 0) && (master_node_ids.size() > 0))
740  {
741  _fe_problem.reinitNodes(master_node_ids, tid);
742  _fe_problem.reinitNodesNeighbor(slave_node_ids, tid);
743  nc->computeJacobian(jacobian);
744  }
745  }
746  _fe_problem.addCachedJacobian(jacobian, tid);
747  jacobian.close();
748  }
749 }
750 
751 void
752 NonlinearSystemBase::setConstraintSlaveValues(NumericVector<Number> & solution, bool displaced)
753 {
754  std::map<std::pair<unsigned int, unsigned int>, PenetrationLocator *> * penetration_locators =
755  NULL;
756 
757  if (!displaced)
758  {
759  GeometricSearchData & geom_search_data = _fe_problem.geomSearchData();
760  penetration_locators = &geom_search_data._penetration_locators;
761  }
762  else
763  {
764  GeometricSearchData & displaced_geom_search_data =
765  _fe_problem.getDisplacedProblem()->geomSearchData();
766  penetration_locators = &displaced_geom_search_data._penetration_locators;
767  }
768 
769  bool constraints_applied = false;
770 
771  for (const auto & it : *penetration_locators)
772  {
773  PenetrationLocator & pen_loc = *(it.second);
774 
775  std::vector<dof_id_type> & slave_nodes = pen_loc._nearest_node._slave_nodes;
776 
777  BoundaryID slave_boundary = pen_loc._slave_boundary;
778 
779  if (_constraints.hasActiveNodeFaceConstraints(slave_boundary, displaced))
780  {
781  const auto & constraints =
782  _constraints.getActiveNodeFaceConstraints(slave_boundary, displaced);
783 
784  for (unsigned int i = 0; i < slave_nodes.size(); i++)
785  {
786  dof_id_type slave_node_num = slave_nodes[i];
787  Node & slave_node = _mesh.nodeRef(slave_node_num);
788 
789  if (slave_node.processor_id() == processor_id())
790  {
791  if (pen_loc._penetration_info[slave_node_num])
792  {
793  PenetrationInfo & info = *pen_loc._penetration_info[slave_node_num];
794 
795  const Elem * master_elem = info._elem;
796  unsigned int master_side = info._side_num;
797 
798  // reinit variables at the node
799  _fe_problem.reinitNodeFace(&slave_node, slave_boundary, 0);
800 
802 
803  std::vector<Point> points;
804  points.push_back(info._closest_point);
805 
806  // reinit variables on the master element's face at the contact point
807  _fe_problem.setNeighborSubdomainID(master_elem, 0);
808  _fe_problem.reinitNeighborPhys(master_elem, master_side, points, 0);
809 
810  for (const auto & nfc : constraints)
811  if (nfc->shouldApply())
812  {
813  constraints_applied = true;
814  nfc->computeSlaveValue(solution);
815  }
816  }
817  }
818  }
819  }
820  }
821 
822  // See if constraints were applied anywhere
823  _communicator.max(constraints_applied);
824 
825  if (constraints_applied)
826  {
827  solution.close();
828  update();
829  }
830 }
831 
832 void
833 NonlinearSystemBase::constraintResiduals(NumericVector<Number> & residual, bool displaced)
834 {
835  // Make sure the residual is in a good state
836  residual.close();
837 
838  std::map<std::pair<unsigned int, unsigned int>, PenetrationLocator *> * penetration_locators =
839  NULL;
840 
841  if (!displaced)
842  {
843  GeometricSearchData & geom_search_data = _fe_problem.geomSearchData();
844  penetration_locators = &geom_search_data._penetration_locators;
845  }
846  else
847  {
848  GeometricSearchData & displaced_geom_search_data =
849  _fe_problem.getDisplacedProblem()->geomSearchData();
850  penetration_locators = &displaced_geom_search_data._penetration_locators;
851  }
852 
853  bool constraints_applied;
854  bool residual_has_inserted_values = false;
856  constraints_applied = false;
857  for (const auto & it : *penetration_locators)
858  {
860  {
861  // Reset the constraint_applied flag before each new constraint, as they need to be assembled
862  // separately
863  constraints_applied = false;
864  }
865  PenetrationLocator & pen_loc = *(it.second);
866 
867  std::vector<dof_id_type> & slave_nodes = pen_loc._nearest_node._slave_nodes;
868 
869  BoundaryID slave_boundary = pen_loc._slave_boundary;
870 
871  if (_constraints.hasActiveNodeFaceConstraints(slave_boundary, displaced))
872  {
873  const auto & constraints =
874  _constraints.getActiveNodeFaceConstraints(slave_boundary, displaced);
875 
876  for (unsigned int i = 0; i < slave_nodes.size(); i++)
877  {
878  dof_id_type slave_node_num = slave_nodes[i];
879  Node & slave_node = _mesh.nodeRef(slave_node_num);
880 
881  if (slave_node.processor_id() == processor_id())
882  {
883  if (pen_loc._penetration_info[slave_node_num])
884  {
885  PenetrationInfo & info = *pen_loc._penetration_info[slave_node_num];
886 
887  const Elem * master_elem = info._elem;
888  unsigned int master_side = info._side_num;
889 
890  // *These next steps MUST be done in this order!*
891 
892  // This reinits the variables that exist on the slave node
893  _fe_problem.reinitNodeFace(&slave_node, slave_boundary, 0);
894 
895  // This will set aside residual and jacobian space for the variables that have dofs on
896  // the slave node
898 
899  std::vector<Point> points;
900  points.push_back(info._closest_point);
901 
902  // reinit variables on the master element's face at the contact point
903  _fe_problem.setNeighborSubdomainID(master_elem, 0);
904  _fe_problem.reinitNeighborPhys(master_elem, master_side, points, 0);
905 
906  for (const auto & nfc : constraints)
907  if (nfc->shouldApply())
908  {
909  constraints_applied = true;
910  nfc->computeResidual();
911 
912  if (nfc->overwriteSlaveResidual())
913  {
914  _fe_problem.setResidual(residual, 0);
915  residual_has_inserted_values = true;
916  }
917  else
920  }
921  }
922  }
923  }
924  }
926  {
927  // Make sure that slave contribution to master are assembled, and ghosts have been exchanged,
928  // as current masters might become slaves on next iteration
929  // and will need to contribute their former slaves' contributions
930  // to the future masters.
931  // See if constraints were applied anywhere
932  _communicator.max(constraints_applied);
933 
934  if (constraints_applied)
935  {
936  // If any of the above constraints inserted values in the residual, it needs to be assembled
937  // before adding the cached residuals below.
938  _communicator.max(residual_has_inserted_values);
939  if (residual_has_inserted_values)
940  {
941  residual.close();
942  residual_has_inserted_values = false;
943  }
945  residual.close();
946 
948  *_residual_ghosted = residual;
949  }
950  }
951  }
953  {
954  _communicator.max(constraints_applied);
955 
956  if (constraints_applied)
957  {
958  // If any of the above constraints inserted values in the residual, it needs to be assembled
959  // before adding the cached residuals below.
960  _communicator.max(residual_has_inserted_values);
961  if (residual_has_inserted_values)
962  residual.close();
963 
965  residual.close();
966 
968  *_residual_ghosted = residual;
969  }
970  }
971 
972  THREAD_ID tid = 0;
973  // go over mortar interfaces
974  auto & ifaces = _mesh.getMortarInterfaces();
975  for (const auto & iface : ifaces)
976  {
977  if (_constraints.hasActiveFaceFaceConstraints(iface->_name))
978  {
979  const auto & face_constraints = _constraints.getActiveFaceFaceConstraints(iface->_name);
980 
981  // go over elements on that interface
982  const std::vector<Elem *> & elems = iface->_elems;
983  for (const auto & elem : elems)
984  {
985  // for each element process constraints on the
987  _fe_problem.prepare(elem, tid);
988  _fe_problem.reinitElem(elem, tid);
989 
990  for (const auto & ffc : face_constraints)
991  {
992  ffc->reinit();
993  ffc->computeResidual();
994  }
996 
997  // evaluate residuals that go into master and slave side
998  for (const auto & ffc : face_constraints)
999  {
1000  ffc->reinitSide(Moose::Master);
1001  ffc->computeResidualSide(Moose::Master);
1003 
1004  ffc->reinitSide(Moose::Slave);
1005  ffc->computeResidualSide(Moose::Slave);
1007  }
1008  }
1010  }
1011  }
1012 
1013  // go over element-element constraint interface
1014  std::map<unsigned int, std::shared_ptr<ElementPairLocator>> * element_pair_locators = nullptr;
1015 
1016  if (!displaced)
1017  {
1018  GeometricSearchData & geom_search_data = _fe_problem.geomSearchData();
1019  element_pair_locators = &geom_search_data._element_pair_locators;
1020  }
1021  else
1022  {
1023  GeometricSearchData & displaced_geom_search_data =
1024  _fe_problem.getDisplacedProblem()->geomSearchData();
1025  element_pair_locators = &displaced_geom_search_data._element_pair_locators;
1026  }
1027 
1028  for (const auto & it : *element_pair_locators)
1029  {
1030  ElementPairLocator & elem_pair_loc = *(it.second);
1031 
1033  {
1034  // ElemElemConstraint objects
1035  const auto & _element_constraints = _constraints.getActiveElemElemConstraints(it.first);
1036 
1037  // go over pair elements
1038  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
1039  elem_pair_loc.getElemPairs();
1040  for (const auto & pr : elem_pairs)
1041  {
1042  const Elem * elem1 = pr.first;
1043  const Elem * elem2 = pr.second;
1044 
1045  if (elem1->processor_id() != processor_id())
1046  continue;
1047 
1048  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
1049 
1050  // for each element process constraints on the
1051  for (const auto & ec : _element_constraints)
1052  {
1053  _fe_problem.setCurrentSubdomainID(elem1, tid);
1055  _fe_problem.setNeighborSubdomainID(elem2, tid);
1057 
1058  ec->subProblem().prepareShapes(ec->variable().number(), tid);
1059  ec->subProblem().prepareNeighborShapes(ec->variable().number(), tid);
1060 
1061  ec->reinit(info);
1062  ec->computeResidual();
1065  }
1067  }
1068  }
1069  }
1070 }
1071 
1072 void
1074 {
1075  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
1076  {
1077  _kernels.residualSetup(tid);
1080  if (_doing_dg)
1086  }
1091 
1092  // reinit scalar variables
1093  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1095 
1096  // residual contributions from the domain
1097  PARALLEL_TRY
1098  {
1099  Moose::perf_log.push("computeKernels()", "Execution");
1100 
1101  ConstElemRange & elem_range = *_mesh.getActiveLocalElementRange();
1102 
1104 
1105  Threads::parallel_reduce(elem_range, cr);
1106 
1107  unsigned int n_threads = libMesh::n_threads();
1108  for (unsigned int i = 0; i < n_threads;
1109  i++) // Add any cached residuals that might be hanging around
1111 
1112  Moose::perf_log.pop("computeKernels()", "Execution");
1113  }
1114  PARALLEL_CATCH;
1115 
1116  // residual contributions from the scalar kernels
1117  PARALLEL_TRY
1118  {
1119  // do scalar kernels (not sure how to thread this)
1121  {
1122  Moose::perf_log.push("computScalarKernels()", "Execution");
1123 
1124  const std::vector<std::shared_ptr<ScalarKernel>> * scalars;
1125 
1126  // Use the right subset of ScalarKernels depending on the KernelType.
1127  switch (type)
1128  {
1129  case Moose::KT_ALL:
1130  scalars = &(_scalar_kernels.getActiveObjects());
1131  break;
1132 
1133  case Moose::KT_TIME:
1134  scalars = &(_time_scalar_kernels.getActiveObjects());
1135  break;
1136 
1137  case Moose::KT_NONTIME:
1139  break;
1140 
1141  default:
1142  mooseError("Unrecognized KernelType in computeResidualInternal().");
1143  }
1144 
1145  bool have_scalar_contributions = false;
1146  for (const auto & scalar_kernel : *scalars)
1147  {
1148  scalar_kernel->reinit();
1149  const std::vector<dof_id_type> & dof_indices = scalar_kernel->variable().dofIndices();
1150  const DofMap & dof_map = scalar_kernel->variable().dofMap();
1151  const dof_id_type first_dof = dof_map.first_dof();
1152  const dof_id_type end_dof = dof_map.end_dof();
1153  for (dof_id_type dof : dof_indices)
1154  {
1155  if (dof >= first_dof && dof < end_dof)
1156  {
1157  scalar_kernel->computeResidual();
1158  have_scalar_contributions = true;
1159  break;
1160  }
1161  }
1162  }
1163  if (have_scalar_contributions)
1165 
1166  Moose::perf_log.pop("computScalarKernels()", "Execution");
1167  }
1168  }
1169  PARALLEL_CATCH;
1170 
1171  // residual contributions from Block NodalKernels
1172  PARALLEL_TRY
1173  {
1175  {
1176  Moose::perf_log.push("computNodalKernels()", "Execution");
1177 
1179 
1180  ConstNodeRange & range = *_mesh.getLocalNodeRange();
1181 
1182  if (range.begin() != range.end())
1183  {
1184  _fe_problem.reinitNode(*range.begin(), 0);
1185 
1186  Threads::parallel_reduce(range, cnk);
1187 
1188  unsigned int n_threads = libMesh::n_threads();
1189  for (unsigned int i = 0; i < n_threads;
1190  i++) // Add any cached residuals that might be hanging around
1192  }
1193 
1194  Moose::perf_log.pop("computNodalKernels()", "Execution");
1195  }
1196  }
1197  PARALLEL_CATCH;
1198 
1199  // residual contributions from boundary NodalKernels
1200  PARALLEL_TRY
1201  {
1203  {
1204  Moose::perf_log.push("computNodalKernelBCs()", "Execution");
1205 
1207 
1208  ConstBndNodeRange & bnd_node_range = *_mesh.getBoundaryNodeRange();
1209 
1210  Threads::parallel_reduce(bnd_node_range, cnk);
1211 
1212  unsigned int n_threads = libMesh::n_threads();
1213  for (unsigned int i = 0; i < n_threads;
1214  i++) // Add any cached residuals that might be hanging around
1216 
1217  Moose::perf_log.pop("computNodalKernelBCs()", "Execution");
1218  }
1219  }
1220  PARALLEL_CATCH;
1221 
1222  if (_need_residual_copy)
1223  {
1224  _Re_non_time->close();
1225  _Re_non_time->localize(_residual_copy);
1226  }
1227 
1229  {
1230  _Re_non_time->close();
1232  _residual_ghosted->close();
1233  }
1234 
1235  PARALLEL_TRY { computeDiracContributions(); }
1236  PARALLEL_CATCH;
1237 
1239  {
1240  PARALLEL_TRY { enforceNodalConstraintsResidual(*_Re_non_time); }
1241  PARALLEL_CATCH;
1242  _Re_non_time->close();
1243  }
1244 
1245  // Add in Residual contributions from Constraints
1247  {
1248  PARALLEL_TRY
1249  {
1250  // Undisplaced Constraints
1252 
1253  // Displaced Constraints
1256  }
1257  PARALLEL_CATCH;
1258  _Re_non_time->close();
1259  }
1260 }
1261 
1262 void
1263 NonlinearSystemBase::computeNodalBCs(NumericVector<Number> & residual,
1264  Moose::KernelType kernel_type)
1265 {
1266  // We need to close the diag_save_in variables on the aux system before NodalBCs clear the dofs on
1267  // boundary nodes
1268  if (_has_save_in)
1270 
1271  PARALLEL_TRY
1272  {
1273  ConstBndNodeRange & bnd_nodes = *_mesh.getBoundaryNodeRange();
1274 
1275  if (!bnd_nodes.empty())
1276  {
1277  Moose::perf_log.push("computeNodalBCs()", "Execution");
1278 
1279  for (const auto & bnode : bnd_nodes)
1280  {
1281  BoundaryID boundary_id = bnode->_bnd_id;
1282  Node * node = bnode->_node;
1283 
1284  if (node->processor_id() == processor_id())
1285  {
1286  // reinit variables in nodes
1287  _fe_problem.reinitNodeFace(node, boundary_id, 0);
1288 
1289  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
1290  {
1291  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
1292  for (const auto & nbc : bcs)
1293  if (nbc->shouldApply())
1294  {
1295  if (kernel_type == Moose::KT_EIGEN)
1296  nbc->setBCOnEigen(true);
1297  else
1298  nbc->setBCOnEigen(false);
1299 
1300  nbc->computeResidual(residual);
1301  }
1302  }
1303  }
1304  }
1305 
1306  Moose::perf_log.pop("computeNodalBCs()", "Execution");
1307  }
1308  }
1309  PARALLEL_CATCH;
1310 
1311  residual.close();
1312  if (_Re_time)
1313  _Re_time->close();
1314  _Re_non_time->close();
1315 }
1316 
1317 void
1318 NonlinearSystemBase::getNodeDofs(dof_id_type node_id, std::vector<dof_id_type> & dofs)
1319 {
1320  const Node & node = _mesh.nodeRef(node_id);
1321  unsigned int s = number();
1322  if (node.has_dofs(s))
1323  {
1324  for (unsigned int v = 0; v < nVariables(); v++)
1325  for (unsigned int c = 0; c < node.n_comp(s, v); c++)
1326  dofs.push_back(node.dof_number(s, v, c));
1327  }
1328 }
1329 
1330 void
1332  GeometricSearchData & geom_search_data, std::map<dof_id_type, std::vector<dof_id_type>> & graph)
1333 {
1334  std::map<std::pair<unsigned int, unsigned int>, NearestNodeLocator *> & nearest_node_locators =
1335  geom_search_data._nearest_node_locators;
1336 
1337  const auto & node_to_elem_map = _mesh.nodeToElemMap();
1338  for (const auto & it : nearest_node_locators)
1339  {
1340  std::vector<dof_id_type> & slave_nodes = it.second->_slave_nodes;
1341 
1342  for (const auto & slave_node : slave_nodes)
1343  {
1344  std::set<dof_id_type> unique_slave_indices;
1345  std::set<dof_id_type> unique_master_indices;
1346 
1347  auto node_to_elem_pair = node_to_elem_map.find(slave_node);
1348  if (node_to_elem_pair != node_to_elem_map.end())
1349  {
1350  const std::vector<dof_id_type> & elems = node_to_elem_pair->second;
1351 
1352  // Get the dof indices from each elem connected to the node
1353  for (const auto & cur_elem : elems)
1354  {
1355  std::vector<dof_id_type> dof_indices;
1356  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
1357 
1358  for (const auto & dof : dof_indices)
1359  unique_slave_indices.insert(dof);
1360  }
1361  }
1362 
1363  std::vector<dof_id_type> master_nodes = it.second->_neighbor_nodes[slave_node];
1364 
1365  for (const auto & master_node : master_nodes)
1366  {
1367  auto master_node_to_elem_pair = node_to_elem_map.find(master_node);
1368  mooseAssert(master_node_to_elem_pair != node_to_elem_map.end(),
1369  "Missing entry in node to elem map");
1370  const std::vector<dof_id_type> & master_node_elems = master_node_to_elem_pair->second;
1371 
1372  // Get the dof indices from each elem connected to the node
1373  for (const auto & cur_elem : master_node_elems)
1374  {
1375  std::vector<dof_id_type> dof_indices;
1376  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
1377 
1378  for (const auto & dof : dof_indices)
1379  unique_master_indices.insert(dof);
1380  }
1381  }
1382 
1383  for (const auto & slave_id : unique_slave_indices)
1384  for (const auto & master_id : unique_master_indices)
1385  {
1386  graph[slave_id].push_back(master_id);
1387  graph[master_id].push_back(slave_id);
1388  }
1389  }
1390  }
1391 
1392  // handle node-to-node constraints
1393  const auto & ncs = _constraints.getActiveNodalConstraints();
1394  for (const auto & nc : ncs)
1395  {
1396  std::vector<dof_id_type> master_dofs;
1397  std::vector<dof_id_type> & master_node_ids = nc->getMasterNodeId();
1398  for (const auto & node_id : master_node_ids)
1399  {
1400  Node * node = _mesh.queryNodePtr(node_id);
1401  if (node && node->processor_id() == this->processor_id())
1402  {
1403  getNodeDofs(node_id, master_dofs);
1404  }
1405  }
1406 
1407  _communicator.allgather(master_dofs);
1408 
1409  std::vector<dof_id_type> slave_dofs;
1410  std::vector<dof_id_type> & slave_node_ids = nc->getSlaveNodeId();
1411  for (const auto & node_id : slave_node_ids)
1412  {
1413  Node * node = _mesh.queryNodePtr(node_id);
1414  if (node && node->processor_id() == this->processor_id())
1415  {
1416  getNodeDofs(node_id, slave_dofs);
1417  }
1418  }
1419 
1420  _communicator.allgather(slave_dofs);
1421 
1422  for (const auto & master_id : master_dofs)
1423  for (const auto & slave_id : slave_dofs)
1424  {
1425  graph[master_id].push_back(slave_id);
1426  graph[slave_id].push_back(master_id);
1427  }
1428  }
1429 
1430  // Make every entry sorted and unique
1431  for (auto & it : graph)
1432  {
1433  std::vector<dof_id_type> & row = it.second;
1434  std::sort(row.begin(), row.end());
1435  std::vector<dof_id_type>::iterator uit = std::unique(row.begin(), row.end());
1436  row.resize(uit - row.begin());
1437  }
1438 }
1439 
1440 void
1442  GeometricSearchData & geom_search_data)
1443 {
1444  std::map<dof_id_type, std::vector<dof_id_type>> graph;
1445 
1446  findImplicitGeometricCouplingEntries(geom_search_data, graph);
1447 
1448  for (const auto & it : graph)
1449  {
1450  dof_id_type dof = it.first;
1451  const std::vector<dof_id_type> & row = it.second;
1452 
1453  for (const auto & coupled_dof : row)
1454  jacobian.add(dof, coupled_dof, 0);
1455  }
1456 }
1457 
1458 void
1459 NonlinearSystemBase::constraintJacobians(SparseMatrix<Number> & jacobian, bool displaced)
1460 {
1461 #if PETSC_VERSION_LESS_THAN(3, 3, 0)
1462 #else
1464  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1465  MAT_NEW_NONZERO_ALLOCATION_ERR,
1466  PETSC_FALSE);
1468  MatSetOption(
1469  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1470 #endif
1471 
1472  std::vector<numeric_index_type> zero_rows;
1473  std::map<std::pair<unsigned int, unsigned int>, PenetrationLocator *> * penetration_locators =
1474  NULL;
1475 
1476  if (!displaced)
1477  {
1478  GeometricSearchData & geom_search_data = _fe_problem.geomSearchData();
1479  penetration_locators = &geom_search_data._penetration_locators;
1480  }
1481  else
1482  {
1483  GeometricSearchData & displaced_geom_search_data =
1484  _fe_problem.getDisplacedProblem()->geomSearchData();
1485  penetration_locators = &displaced_geom_search_data._penetration_locators;
1486  }
1487 
1488  bool constraints_applied;
1490  constraints_applied = false;
1491  for (const auto & it : *penetration_locators)
1492  {
1494  {
1495  // Reset the constraint_applied flag before each new constraint, as they need to be assembled
1496  // separately
1497  constraints_applied = false;
1498  }
1499  PenetrationLocator & pen_loc = *(it.second);
1500 
1501  std::vector<dof_id_type> & slave_nodes = pen_loc._nearest_node._slave_nodes;
1502 
1503  BoundaryID slave_boundary = pen_loc._slave_boundary;
1504 
1505  zero_rows.clear();
1506  if (_constraints.hasActiveNodeFaceConstraints(slave_boundary, displaced))
1507  {
1508  const auto & constraints =
1509  _constraints.getActiveNodeFaceConstraints(slave_boundary, displaced);
1510 
1511  for (const auto & slave_node_num : slave_nodes)
1512  {
1513  Node & slave_node = _mesh.nodeRef(slave_node_num);
1514 
1515  if (slave_node.processor_id() == processor_id())
1516  {
1517  if (pen_loc._penetration_info[slave_node_num])
1518  {
1519  PenetrationInfo & info = *pen_loc._penetration_info[slave_node_num];
1520 
1521  const Elem * master_elem = info._elem;
1522  unsigned int master_side = info._side_num;
1523 
1524  // reinit variables at the node
1525  _fe_problem.reinitNodeFace(&slave_node, slave_boundary, 0);
1526 
1529 
1530  std::vector<Point> points;
1531  points.push_back(info._closest_point);
1532 
1533  // reinit variables on the master element's face at the contact point
1534  _fe_problem.setNeighborSubdomainID(master_elem, 0);
1535  _fe_problem.reinitNeighborPhys(master_elem, master_side, points, 0);
1536  for (const auto & nfc : constraints)
1537  {
1538  nfc->_jacobian = &jacobian;
1539 
1540  if (nfc->shouldApply())
1541  {
1542  constraints_applied = true;
1543 
1544  nfc->subProblem().prepareShapes(nfc->variable().number(), 0);
1545  nfc->subProblem().prepareNeighborShapes(nfc->variable().number(), 0);
1546 
1547  nfc->computeJacobian();
1548 
1549  if (nfc->overwriteSlaveJacobian())
1550  {
1551  // Add this variable's dof's row to be zeroed
1552  zero_rows.push_back(nfc->variable().nodalDofIndex());
1553  }
1554 
1555  std::vector<dof_id_type> slave_dofs(1, nfc->variable().nodalDofIndex());
1556 
1557  // Cache the jacobian block for the slave side
1559  slave_dofs,
1560  nfc->_connected_dof_indices,
1561  nfc->variable().scalingFactor());
1562 
1563  // Cache the jacobian block for the master side
1564  if (nfc->addCouplingEntriesToJacobian())
1566  nfc->_Kne,
1567  nfc->masterVariable().dofIndicesNeighbor(),
1568  nfc->_connected_dof_indices,
1569  nfc->variable().scalingFactor());
1570 
1572  if (nfc->addCouplingEntriesToJacobian())
1574 
1575  // Do the off-diagonals next
1576  const std::vector<MooseVariable *> coupled_vars = nfc->getCoupledMooseVars();
1577  for (const auto & jvar : coupled_vars)
1578  {
1579  // Only compute jacobians for nonlinear variables
1580  if (jvar->kind() != Moose::VAR_NONLINEAR)
1581  continue;
1582 
1583  // Only compute Jacobian entries if this coupling is being used by the
1584  // preconditioner
1585  if (nfc->variable().number() == jvar->number() ||
1586  !_fe_problem.areCoupled(nfc->variable().number(), jvar->number()))
1587  continue;
1588 
1589  // Need to zero out the matrices first
1591 
1592  nfc->subProblem().prepareShapes(nfc->variable().number(), 0);
1593  nfc->subProblem().prepareNeighborShapes(jvar->number(), 0);
1594 
1595  nfc->computeOffDiagJacobian(jvar->number());
1596 
1597  // Cache the jacobian block for the slave side
1599  slave_dofs,
1600  nfc->_connected_dof_indices,
1601  nfc->variable().scalingFactor());
1602 
1603  // Cache the jacobian block for the master side
1604  if (nfc->addCouplingEntriesToJacobian())
1606  nfc->variable().dofIndicesNeighbor(),
1607  nfc->_connected_dof_indices,
1608  nfc->variable().scalingFactor());
1609 
1611  if (nfc->addCouplingEntriesToJacobian())
1613  }
1614  }
1615  }
1616  }
1617  }
1618  }
1619  }
1621  {
1622  // See if constraints were applied anywhere
1623  _communicator.max(constraints_applied);
1624 
1625  if (constraints_applied)
1626  {
1627 #ifdef LIBMESH_HAVE_PETSC
1628 // Necessary for speed
1629 #if PETSC_VERSION_LESS_THAN(3, 0, 0)
1630  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS);
1631 #elif PETSC_VERSION_LESS_THAN(3, 1, 0)
1632  // In Petsc 3.0.0, MatSetOption has three args...the third arg
1633  // determines whether the option is set (true) or unset (false)
1634  MatSetOption(
1635  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
1636 #else
1637  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1638  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
1639  PETSC_TRUE);
1640 #endif
1641 #endif
1642 
1643  jacobian.close();
1644  jacobian.zero_rows(zero_rows, 0.0);
1645  jacobian.close();
1646  _fe_problem.addCachedJacobian(jacobian, 0);
1647  jacobian.close();
1648  }
1649  }
1650  }
1652  {
1653  // See if constraints were applied anywhere
1654  _communicator.max(constraints_applied);
1655 
1656  if (constraints_applied)
1657  {
1658 #ifdef LIBMESH_HAVE_PETSC
1659 // Necessary for speed
1660 #if PETSC_VERSION_LESS_THAN(3, 0, 0)
1661  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS);
1662 #elif PETSC_VERSION_LESS_THAN(3, 1, 0)
1663  // In Petsc 3.0.0, MatSetOption has three args...the third arg
1664  // determines whether the option is set (true) or unset (false)
1665  MatSetOption(
1666  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
1667 #else
1668  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1669  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
1670  PETSC_TRUE);
1671 #endif
1672 #endif
1673 
1674  jacobian.close();
1675  jacobian.zero_rows(zero_rows, 0.0);
1676  jacobian.close();
1677  _fe_problem.addCachedJacobian(jacobian, 0);
1678  jacobian.close();
1679  }
1680  }
1681 
1682  THREAD_ID tid = 0;
1683  // go over mortar interfaces
1684  auto & ifaces = _mesh.getMortarInterfaces();
1685  for (const auto & iface : ifaces)
1686  {
1687  if (_constraints.hasActiveFaceFaceConstraints(iface->_name))
1688  {
1689  // FaceFaceConstraint objects
1690  const auto & face_constraints = _constraints.getActiveFaceFaceConstraints(iface->_name);
1691 
1692  // go over elements on that interface
1693  const std::vector<Elem *> & elems = iface->_elems;
1694  for (const auto & elem : elems)
1695  {
1696  // for each element process constraints on the
1697  for (const auto & ffc : face_constraints)
1698  {
1700  _fe_problem.prepare(elem, tid);
1701  _fe_problem.reinitElem(elem, tid);
1702  ffc->reinit();
1703  ffc->subProblem().prepareShapes(ffc->variable().number(), tid);
1704  ffc->computeJacobian();
1706 
1707  ffc->reinitSide(Moose::Master);
1708  ffc->computeJacobianSide(Moose::Master);
1710 
1711  ffc->reinitSide(Moose::Slave);
1712  ffc->computeJacobianSide(Moose::Slave);
1714  }
1715 
1716  _fe_problem.addCachedJacobian(jacobian, tid);
1717  }
1718  }
1719  }
1720 
1721  // go over element-element constraint interface
1722  std::map<unsigned int, std::shared_ptr<ElementPairLocator>> * element_pair_locators = nullptr;
1723 
1724  if (!displaced)
1725  {
1726  GeometricSearchData & geom_search_data = _fe_problem.geomSearchData();
1727  element_pair_locators = &geom_search_data._element_pair_locators;
1728  }
1729  else
1730  {
1731  GeometricSearchData & displaced_geom_search_data =
1732  _fe_problem.getDisplacedProblem()->geomSearchData();
1733  element_pair_locators = &displaced_geom_search_data._element_pair_locators;
1734  }
1735 
1736  for (const auto & it : *element_pair_locators)
1737  {
1738  ElementPairLocator & elem_pair_loc = *(it.second);
1739 
1741  {
1742  // ElemElemConstraint objects
1743  const auto & _element_constraints = _constraints.getActiveElemElemConstraints(it.first);
1744 
1745  // go over pair elements
1746  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
1747  elem_pair_loc.getElemPairs();
1748  for (const auto & pr : elem_pairs)
1749  {
1750  const Elem * elem1 = pr.first;
1751  const Elem * elem2 = pr.second;
1752 
1753  if (elem1->processor_id() != processor_id())
1754  continue;
1755 
1756  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
1757 
1758  // for each element process constraints on the
1759  for (const auto & ec : _element_constraints)
1760  {
1761  _fe_problem.setCurrentSubdomainID(elem1, tid);
1763  _fe_problem.setNeighborSubdomainID(elem2, tid);
1765 
1766  ec->subProblem().prepareShapes(ec->variable().number(), tid);
1767  ec->subProblem().prepareNeighborShapes(ec->variable().number(), tid);
1768 
1769  ec->reinit(info);
1770  ec->computeJacobian();
1773  }
1774  _fe_problem.addCachedJacobian(jacobian, tid);
1775  }
1776  }
1777  }
1778 }
1779 
1780 void
1782 {
1783  // Compute the diagonal block for scalar variables
1785  {
1786  const auto & scalars = _scalar_kernels.getActiveObjects();
1787 
1788  _fe_problem.reinitScalars(/*tid=*/0);
1789 
1790  bool have_scalar_contributions = false;
1791  for (const auto & kernel : scalars)
1792  {
1793  kernel->reinit();
1794  const std::vector<dof_id_type> & dof_indices = kernel->variable().dofIndices();
1795  const DofMap & dof_map = kernel->variable().dofMap();
1796  const dof_id_type first_dof = dof_map.first_dof();
1797  const dof_id_type end_dof = dof_map.end_dof();
1798  for (dof_id_type dof : dof_indices)
1799  {
1800  if (dof >= first_dof && dof < end_dof)
1801  {
1802  kernel->computeJacobian();
1803  _fe_problem.addJacobianOffDiagScalar(jacobian, kernel->variable().number());
1804  have_scalar_contributions = true;
1805  break;
1806  }
1807  }
1808  }
1809 
1810  if (have_scalar_contributions)
1811  _fe_problem.addJacobianScalar(jacobian);
1812  }
1813 }
1814 
1815 void
1816 NonlinearSystemBase::computeJacobianInternal(SparseMatrix<Number> & jacobian,
1817  Moose::KernelType kernel_type)
1818 {
1819 #ifdef LIBMESH_HAVE_PETSC
1820 // Necessary for speed
1821 #if PETSC_VERSION_LESS_THAN(3, 0, 0)
1822  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS);
1823 #elif PETSC_VERSION_LESS_THAN(3, 1, 0)
1824  // In Petsc 3.0.0, MatSetOption has three args...the third arg
1825  // determines whether the option is set (true) or unset (false)
1826  MatSetOption(
1827  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
1828 #else
1829  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1830  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
1831  PETSC_TRUE);
1832 #endif
1833 #if PETSC_VERSION_LESS_THAN(3, 3, 0)
1834 #else
1836  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1837  MAT_NEW_NONZERO_ALLOCATION_ERR,
1838  PETSC_FALSE);
1839 #endif
1840 
1841 #endif
1842 
1843  // jacobianSetup /////
1844  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
1845  {
1846  _kernels.jacobianSetup(tid);
1849  if (_doing_dg)
1855  }
1860 
1861  // reinit scalar variables
1862  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1864 
1865  PARALLEL_TRY
1866  {
1867  ConstElemRange & elem_range = *_mesh.getActiveLocalElementRange();
1868  switch (_fe_problem.coupling())
1869  {
1870  case Moose::COUPLING_DIAG:
1871  {
1872  ComputeJacobianThread cj(_fe_problem, jacobian, kernel_type);
1873  Threads::parallel_reduce(elem_range, cj);
1874 
1875  unsigned int n_threads = libMesh::n_threads();
1876  for (unsigned int i = 0; i < n_threads;
1877  i++) // Add any Jacobian contributions still hanging around
1878  _fe_problem.addCachedJacobian(jacobian, i);
1879 
1880  // Block restricted Nodal Kernels
1882  {
1884  ConstNodeRange & range = *_mesh.getLocalNodeRange();
1885  Threads::parallel_reduce(range, cnkjt);
1886 
1887  unsigned int n_threads = libMesh::n_threads();
1888  for (unsigned int i = 0; i < n_threads;
1889  i++) // Add any cached jacobians that might be hanging around
1891  }
1892 
1893  // Boundary restricted Nodal Kernels
1895  {
1897  ConstBndNodeRange & bnd_range = *_mesh.getBoundaryNodeRange();
1898 
1899  Threads::parallel_reduce(bnd_range, cnkjt);
1900  unsigned int n_threads = libMesh::n_threads();
1901  for (unsigned int i = 0; i < n_threads;
1902  i++) // Add any cached jacobians that might be hanging around
1904  }
1905  }
1906  break;
1907 
1908  default:
1910  {
1911  ComputeFullJacobianThread cj(_fe_problem, jacobian, kernel_type);
1912  Threads::parallel_reduce(elem_range, cj);
1913  unsigned int n_threads = libMesh::n_threads();
1914 
1915  for (unsigned int i = 0; i < n_threads; i++)
1916  _fe_problem.addCachedJacobian(jacobian, i);
1917 
1918  // Block restricted Nodal Kernels
1920  {
1922  ConstNodeRange & range = *_mesh.getLocalNodeRange();
1923  Threads::parallel_reduce(range, cnkjt);
1924 
1925  unsigned int n_threads = libMesh::n_threads();
1926  for (unsigned int i = 0; i < n_threads;
1927  i++) // Add any cached jacobians that might be hanging around
1929  }
1930 
1931  // Boundary restricted Nodal Kernels
1933  {
1935  ConstBndNodeRange & bnd_range = *_mesh.getBoundaryNodeRange();
1936 
1937  Threads::parallel_reduce(bnd_range, cnkjt);
1938 
1939  unsigned int n_threads = libMesh::n_threads();
1940  for (unsigned int i = 0; i < n_threads;
1941  i++) // Add any cached jacobians that might be hanging around
1943  }
1944  }
1945  break;
1946  }
1947 
1948  computeDiracContributions(&jacobian);
1950 
1951  static bool first = true;
1952 
1953  // This adds zeroes into geometric coupling entries to ensure they stay in the matrix
1955  {
1956  first = false;
1958 
1961  _fe_problem.getDisplacedProblem()->geomSearchData());
1962  }
1963  }
1964  PARALLEL_CATCH;
1965  jacobian.close();
1966 
1967  PARALLEL_TRY
1968  {
1969  // Add in Jacobian contributions from Constraints
1971  {
1972  // Nodal Constraints
1974 
1975  // Undisplaced Constraints
1976  constraintJacobians(jacobian, false);
1977 
1978  // Displaced Constraints
1980  constraintJacobians(jacobian, true);
1981  }
1982  }
1983  PARALLEL_CATCH;
1984  jacobian.close();
1985 
1986  // We need to close the save_in variables on the aux system before NodalBCs clear the dofs on
1987  // boundary nodes
1988  if (_has_diag_save_in)
1990 
1991  PARALLEL_TRY
1992  {
1993  // Cache the information about which BCs are coupled to which
1994  // variables, so we don't have to figure it out for each node.
1995  std::map<std::string, std::set<unsigned int>> bc_involved_vars;
1996  const std::set<BoundaryID> & all_boundary_ids = _mesh.getBoundaryIDs();
1997  for (const auto & bid : all_boundary_ids)
1998  {
1999  // Get reference to all the NodalBCs for this ID. This is only
2000  // safe if there are NodalBCs there to be gotten...
2002  {
2003  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(bid);
2004  for (const auto & bc : bcs)
2005  {
2006  const std::vector<MooseVariable *> & coupled_moose_vars = bc->getCoupledMooseVars();
2007 
2008  // Create the set of "involved" MOOSE nonlinear vars, which includes all coupled vars and
2009  // the BC's own variable
2010  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
2011  for (const auto & coupled_var : coupled_moose_vars)
2012  if (coupled_var->kind() == Moose::VAR_NONLINEAR)
2013  var_set.insert(coupled_var->number());
2014 
2015  var_set.insert(bc->variable().number());
2016  }
2017  }
2018  }
2019 
2020  // Get variable coupling list. We do all the NodalBC stuff on
2021  // thread 0... The couplingEntries() data structure determines
2022  // which variables are "coupled" as far as the preconditioner is
2023  // concerned, not what variables a boundary condition specifically
2024  // depends on.
2025  std::vector<std::pair<MooseVariable *, MooseVariable *>> & coupling_entries =
2026  _fe_problem.couplingEntries(/*_tid=*/0);
2027 
2028  // Compute Jacobians for NodalBCs
2029  ConstBndNodeRange & bnd_nodes = *_mesh.getBoundaryNodeRange();
2030  for (const auto & bnode : bnd_nodes)
2031  {
2032  BoundaryID boundary_id = bnode->_bnd_id;
2033  Node * node = bnode->_node;
2034 
2035  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id) &&
2036  node->processor_id() == processor_id())
2037  {
2038  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2039 
2040  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
2041  for (const auto & bc : bcs)
2042  {
2043  // Get the set of involved MOOSE vars for this BC
2044  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
2045 
2046  // Loop over all the variables whose Jacobian blocks are
2047  // actually being computed, call computeOffDiagJacobian()
2048  // for each one which is actually coupled (otherwise the
2049  // value is zero.)
2050  for (const auto & it : coupling_entries)
2051  {
2052  unsigned int ivar = it.first->number(), jvar = it.second->number();
2053 
2054  // We are only going to call computeOffDiagJacobian() if:
2055  // 1.) the BC's variable is ivar
2056  // 2.) jvar is "involved" with the BC (including jvar==ivar), and
2057  // 3.) the BC should apply.
2058  if ((bc->variable().number() == ivar) && var_set.count(jvar) && bc->shouldApply())
2059  bc->computeOffDiagJacobian(jvar);
2060  }
2061  }
2062  }
2063  } // end loop over boundary nodes
2064 
2065  // For the matrix in the right side of generalized eigenvalue problems, its conresponding
2066  // rows are zeroed if homogeneous Dirichlet boundary conditions are used.
2067  if (kernel_type == Moose::KT_EIGEN)
2069  // Set the cached NodalBC values in the Jacobian matrix
2070  else
2072  }
2073  PARALLEL_CATCH;
2074  jacobian.close();
2075 
2076  // We need to close the save_in variables on the aux system before NodalBCs clear the dofs on
2077  // boundary nodes
2080 
2081  if (hasDiagSaveIn())
2083 }
2084 
2085 void
2086 NonlinearSystemBase::setVariableGlobalDoFs(const std::string & var_name)
2087 {
2088  AllLocalDofIndicesThread aldit(_sys.system(), {var_name});
2089  ConstElemRange & elem_range = *_mesh.getActiveLocalElementRange();
2090  Threads::parallel_reduce(elem_range, aldit);
2091  _communicator.set_union(aldit._all_dof_indices);
2092  _var_all_dof_indices.assign(aldit._all_dof_indices.begin(), aldit._all_dof_indices.end());
2093 }
2094 
2095 void
2096 NonlinearSystemBase::computeJacobian(SparseMatrix<Number> & jacobian, Moose::KernelType kernel_type)
2097 {
2098  Moose::perf_log.push("compute_jacobian()", "Execution");
2099 
2100  Moose::enableFPE();
2101 
2102  try
2103  {
2104  jacobian.zero();
2105  computeJacobianInternal(jacobian, kernel_type);
2106  }
2107  catch (MooseException & e)
2108  {
2109  // The buck stops here, we have already handled the exception by
2110  // calling stopSolve(), it is now up to PETSc to return a
2111  // "diverged" reason during the next solve.
2112  }
2113 
2114  Moose::enableFPE(false);
2115 
2116  Moose::perf_log.pop("compute_jacobian()", "Execution");
2117 }
2118 
2119 void
2120 NonlinearSystemBase::computeJacobianBlocks(std::vector<JacobianBlock *> & blocks)
2121 {
2122  Moose::perf_log.push("compute_jacobian_block()", "Execution");
2123 
2124  Moose::enableFPE();
2125 
2126  for (unsigned int i = 0; i < blocks.size(); i++)
2127  {
2128  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
2129 
2130 #ifdef LIBMESH_HAVE_PETSC
2131 // Necessary for speed
2132 #if PETSC_VERSION_LESS_THAN(3, 0, 0)
2133  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS);
2134 #elif PETSC_VERSION_LESS_THAN(3, 1, 0)
2135  // In Petsc 3.0.0, MatSetOption has three args...the third arg
2136  // determines whether the option is set (true) or unset (false)
2137  MatSetOption(
2138  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
2139 #else
2140  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2141  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2142  PETSC_TRUE);
2143 #endif
2144 #if PETSC_VERSION_LESS_THAN(3, 3, 0)
2145 #else
2147  MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2148  MAT_NEW_NONZERO_ALLOCATION_ERR,
2149  PETSC_FALSE);
2150 #endif
2151 
2152 #endif
2153 
2154  jacobian.zero();
2155  }
2156 
2157  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
2159 
2160  PARALLEL_TRY
2161  {
2162  ConstElemRange & elem_range = *_mesh.getActiveLocalElementRange();
2164  Threads::parallel_reduce(elem_range, cjb);
2165  }
2166  PARALLEL_CATCH;
2167 
2168  for (unsigned int i = 0; i < blocks.size(); i++)
2169  blocks[i]->_jacobian.close();
2170 
2171  for (unsigned int i = 0; i < blocks.size(); i++)
2172  {
2173  libMesh::System & precond_system = blocks[i]->_precond_system;
2174  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
2175 
2176  unsigned int ivar = blocks[i]->_ivar;
2177  unsigned int jvar = blocks[i]->_jvar;
2178 
2179  // Dirichlet BCs
2180  std::vector<numeric_index_type> zero_rows;
2181  PARALLEL_TRY
2182  {
2183  ConstBndNodeRange & bnd_nodes = *_mesh.getBoundaryNodeRange();
2184  for (const auto & bnode : bnd_nodes)
2185  {
2186  BoundaryID boundary_id = bnode->_bnd_id;
2187  Node * node = bnode->_node;
2188 
2189  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
2190  {
2191  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
2192 
2193  if (node->processor_id() == processor_id())
2194  {
2195  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2196 
2197  for (const auto & bc : bcs)
2198  if (bc->variable().number() == ivar && bc->shouldApply())
2199  {
2200  // The first zero is for the variable number... there is only one variable in each
2201  // mini-system
2202  // The second zero only works with Lagrange elements!
2203  zero_rows.push_back(node->dof_number(precond_system.number(), 0, 0));
2204  }
2205  }
2206  }
2207  }
2208  }
2209  PARALLEL_CATCH;
2210 
2211  jacobian.close();
2212 
2213  // This zeroes the rows corresponding to Dirichlet BCs and puts a 1.0 on the diagonal
2214  if (ivar == jvar)
2215  jacobian.zero_rows(zero_rows, 1.0);
2216  else
2217  jacobian.zero_rows(zero_rows, 0.0);
2218 
2219  jacobian.close();
2220  }
2221 
2222  Moose::enableFPE(false);
2223 
2224  Moose::perf_log.pop("compute_jacobian_block()", "Execution");
2225 }
2226 
2227 void
2229 {
2236  _kernels.updateActive(tid);
2238  if (tid == 0)
2239  {
2245  }
2246 }
2247 
2248 Real
2249 NonlinearSystemBase::computeDamping(const NumericVector<Number> & solution,
2250  const NumericVector<Number> & update)
2251 {
2252  Moose::perf_log.push("compute_dampers()", "Execution");
2253 
2254  // Default to no damping
2255  Real damping = 1.0;
2256  bool has_active_dampers = false;
2257 
2259  {
2260  has_active_dampers = true;
2263  Threads::parallel_reduce(*_mesh.getActiveLocalElementRange(), cid);
2264  damping = std::min(cid.damping(), damping);
2265  }
2266 
2268  {
2269  has_active_dampers = true;
2272  Threads::parallel_reduce(*_mesh.getLocalNodeRange(), cndt);
2273  damping = std::min(cndt.damping(), damping);
2274  }
2275 
2277  {
2278  has_active_dampers = true;
2279  const auto & gdampers = _general_dampers.getActiveObjects();
2280  for (const auto & damper : gdampers)
2281  {
2282  Real gd_damping = damper->computeDamping(solution, update);
2283  try
2284  {
2285  damper->checkMinDamping(gd_damping);
2286  }
2287  catch (MooseException & e)
2288  {
2290  }
2291  damping = std::min(gd_damping, damping);
2292  }
2293  }
2294 
2295  _communicator.min(damping);
2296 
2297  if (has_active_dampers && damping < 1.0)
2298  _console << " Damping factor: " << damping << "\n";
2299 
2300  Moose::perf_log.pop("compute_dampers()", "Execution");
2301 
2302  return damping;
2303 }
2304 
2305 void
2306 NonlinearSystemBase::computeDiracContributions(SparseMatrix<Number> * jacobian)
2307 {
2309 
2310  std::set<const Elem *> dirac_elements;
2311 
2313  {
2314  Moose::perf_log.push("computeDiracContributions()", "Execution");
2315 
2316  // TODO: Need a threading fix... but it's complicated!
2317  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
2318  {
2319  const auto & dkernels = _dirac_kernels.getActiveObjects(tid);
2320  for (const auto & dkernel : dkernels)
2321  {
2322  dkernel->clearPoints();
2323  dkernel->addPoints();
2324  }
2325  }
2326 
2327  ComputeDiracThread cd(_fe_problem, jacobian);
2328 
2329  _fe_problem.getDiracElements(dirac_elements);
2330 
2331  DistElemRange range(dirac_elements.begin(), dirac_elements.end(), 1);
2332  // TODO: Make Dirac work thread!
2333  // Threads::parallel_reduce(range, cd);
2334 
2335  cd(range);
2336 
2337  Moose::perf_log.pop("computeDiracContributions()", "Execution");
2338  }
2339 
2340  if (jacobian == NULL)
2341  _Re_non_time->close();
2342 }
2343 
2344 NumericVector<Number> &
2346 {
2347  _need_residual_copy = true;
2348  return _residual_copy;
2349 }
2350 
2351 NumericVector<Number> &
2353 {
2354  _need_residual_ghosted = true;
2355  if (!_residual_ghosted)
2356  _residual_ghosted = &addVector("residual_ghosted", false, GHOSTED);
2357  return *_residual_ghosted;
2358 }
2359 
2360 void
2361 NonlinearSystemBase::augmentSparsity(SparsityPattern::Graph & sparsity,
2362  std::vector<dof_id_type> & n_nz,
2363  std::vector<dof_id_type> & n_oz)
2364 {
2366  {
2368 
2369  std::map<dof_id_type, std::vector<dof_id_type>> graph;
2370 
2372 
2375  graph);
2376 
2377  const dof_id_type first_dof_on_proc = dofMap().first_dof(processor_id());
2378  const dof_id_type end_dof_on_proc = dofMap().end_dof(processor_id());
2379 
2380  // The total number of dofs on and off processor
2381  const dof_id_type n_dofs_on_proc = dofMap().n_local_dofs();
2382  const dof_id_type n_dofs_not_on_proc = dofMap().n_dofs() - dofMap().n_local_dofs();
2383 
2384  for (const auto & git : graph)
2385  {
2386  dof_id_type dof = git.first;
2387  dof_id_type local_dof = dof - first_dof_on_proc;
2388 
2389  if (dof < first_dof_on_proc || dof >= end_dof_on_proc)
2390  continue;
2391 
2392  const std::vector<dof_id_type> & row = git.second;
2393 
2394  SparsityPattern::Row & sparsity_row = sparsity[local_dof];
2395 
2396  unsigned int original_row_length = sparsity_row.size();
2397 
2398  sparsity_row.insert(sparsity_row.end(), row.begin(), row.end());
2399 
2400  SparsityPattern::sort_row(
2401  sparsity_row.begin(), sparsity_row.begin() + original_row_length, sparsity_row.end());
2402 
2403  // Fix up nonzero arrays
2404  for (const auto & coupled_dof : row)
2405  {
2406  if (coupled_dof < first_dof_on_proc || coupled_dof >= end_dof_on_proc)
2407  {
2408  if (n_oz[local_dof] < n_dofs_not_on_proc)
2409  n_oz[local_dof]++;
2410  }
2411  else
2412  {
2413  if (n_nz[local_dof] < n_dofs_on_proc)
2414  n_nz[local_dof]++;
2415  }
2416  }
2417  }
2418  }
2419 }
2420 
2421 void
2423 {
2425  {
2426  if (!_serialized_solution.initialized() || _serialized_solution.size() != _sys.n_dofs())
2427  {
2428  _serialized_solution.clear();
2429  _serialized_solution.init(_sys.n_dofs(), false, SERIAL);
2430  }
2431 
2433  }
2434 }
2435 
2436 void
2437 NonlinearSystemBase::setSolution(const NumericVector<Number> & soln)
2438 {
2439  _current_solution = &soln;
2440 
2443 }
2444 
2445 void
2446 NonlinearSystemBase::setSolutionUDot(const NumericVector<Number> & udot)
2447 {
2448  *_u_dot = udot;
2449 }
2450 
2451 NumericVector<Number> &
2453 {
2454  if (!_serialized_solution.initialized())
2455  _serialized_solution.init(_sys.n_dofs(), false, SERIAL);
2456 
2458  return _serialized_solution;
2459 }
2460 
2461 void
2462 NonlinearSystemBase::setPreconditioner(std::shared_ptr<MoosePreconditioner> pc)
2463 {
2464  if (_preconditioner.get() != nullptr)
2465  mooseError("More than one active Preconditioner detected");
2466 
2467  _preconditioner = pc;
2468 }
2469 
2470 void
2472 {
2473  _increment_vec = &_sys.add_vector("u_increment", true, GHOSTED);
2474 }
2475 
2476 void
2478  const std::set<MooseVariable *> & damped_vars)
2479 {
2480  for (const auto & var : damped_vars)
2481  var->computeIncrementAtQps(*_increment_vec);
2482 }
2483 
2484 void
2486  const std::set<MooseVariable *> & damped_vars)
2487 {
2488  for (const auto & var : damped_vars)
2489  var->computeIncrementAtNode(*_increment_vec);
2490 }
2491 
2492 void
2493 NonlinearSystemBase::checkKernelCoverage(const std::set<SubdomainID> & mesh_subdomains) const
2494 {
2495  // Check kernel coverage of subdomains (blocks) in your mesh
2496  std::set<SubdomainID> input_subdomains;
2497  std::set<std::string> kernel_variables;
2498 
2499  bool global_kernels_exist = _kernels.hasActiveBlockObjects(Moose::ANY_BLOCK_ID);
2500  global_kernels_exist |= _scalar_kernels.hasActiveObjects();
2501  global_kernels_exist |= _nodal_kernels.hasActiveObjects();
2502 
2503  _kernels.subdomainsCovered(input_subdomains, kernel_variables);
2504  _nodal_kernels.subdomainsCovered(input_subdomains, kernel_variables);
2505  _scalar_kernels.subdomainsCovered(input_subdomains, kernel_variables);
2506  _constraints.subdomainsCovered(input_subdomains, kernel_variables);
2507 
2508  if (!global_kernels_exist)
2509  {
2510  std::set<SubdomainID> difference;
2511  std::set_difference(mesh_subdomains.begin(),
2512  mesh_subdomains.end(),
2513  input_subdomains.begin(),
2514  input_subdomains.end(),
2515  std::inserter(difference, difference.end()));
2516 
2517  if (!difference.empty())
2518  {
2519  std::stringstream missing_block_ids;
2520  std::copy(difference.begin(),
2521  difference.end(),
2522  std::ostream_iterator<unsigned int>(missing_block_ids, " "));
2523  mooseError("Each subdomain must contain at least one Kernel.\nThe following block(s) lack an "
2524  "active kernel: " +
2525  missing_block_ids.str());
2526  }
2527  }
2528 
2529  std::set<VariableName> variables(getVariableNames().begin(), getVariableNames().end());
2530 
2531  std::set<VariableName> difference;
2532  std::set_difference(variables.begin(),
2533  variables.end(),
2534  kernel_variables.begin(),
2535  kernel_variables.end(),
2536  std::inserter(difference, difference.end()));
2537 
2538  if (!difference.empty())
2539  {
2540  std::stringstream missing_kernel_vars;
2541  std::copy(difference.begin(),
2542  difference.end(),
2543  std::ostream_iterator<std::string>(missing_kernel_vars, " "));
2544  mooseError("Each variable must be referenced by at least one active Kernel.\nThe following "
2545  "variable(s) lack an active kernel: " +
2546  missing_kernel_vars.str());
2547  }
2548 }
2549 
2550 bool
2552 {
2554 }
2555 
2556 void
2558 {
2559  if (pcs == "left")
2561  else if (pcs == "right")
2563  else if (pcs == "symmetric")
2565  else if (pcs == "default")
2567  else
2568  mooseError("Unknown PC side specified.");
2569 }
2570 
2571 void
2573 {
2574  if (kspnorm == "none")
2576  else if (kspnorm == "preconditioned")
2578  else if (kspnorm == "unpreconditioned")
2580  else if (kspnorm == "natural")
2582  else if (kspnorm == "default")
2584  else
2585  mooseError("Unknown ksp norm type specified.");
2586 }
2587 
2588 bool
2590 {
2591  return _integrated_bcs.hasActiveBoundaryObjects(bnd_id, tid);
2592 }
2593 
2595 {
2596  return _doing_dg;
2597 }
2598 
2599 bool
2601 {
2602  return _doing_dg;
2603 }
2604 
2605 void
2606 NonlinearSystemBase::setPreviousNewtonSolution(const NumericVector<Number> & soln)
2607 {
2609  *_solution_previous_nl = soln;
2610 }
bool errorOnJacobianNonzeroReallocation()
Will return True if the user wants to get an error when a nonzero is reallocated in the Jacobian by P...
NumericVector< Number > * _Re_time
residual vector for time contributions
std::vector< dof_id_type > _var_all_dof_indices
virtual void addCachedResidualDirectly(NumericVector< Number > &residual, THREAD_ID tid)
Allows for all the residual contributions that are currently cached to be added directly into the vec...
std::shared_ptr< T > getActiveObject(const std::string &name, THREAD_ID tid=0) const
ConstElemRange * getActiveLocalElementRange()
Return pointers to range objects for various types of ranges (local nodes, boundary elems...
Definition: MooseMesh.C:685
std::shared_ptr< MooseObject > create(const std::string &obj_name, const std::string &name, InputParameters parameters, THREAD_ID tid=0, bool print_deprecated=true)
Build an object (must be registered) - THIS METHOD IS DEPRECATED (Use create<T>()) ...
Definition: Factory.C:46
virtual void prepare(const Elem *elem, THREAD_ID tid) override
void computeScalarKernelsJacobians(SparseMatrix< Number > &jacobian)
virtual const std::set< BoundaryID > & boundaryIDs() const
Return the boundary IDs for this object.
void enforceNodalConstraintsJacobian(SparseMatrix< Number > &jacobian)
Base class for deriving general dampers.
Definition: GeneralDamper.h:34
void cacheJacobianBlock(DenseMatrix< Number > &jac_block, std::vector< dof_id_type > &idof_indices, std::vector< dof_id_type > &jdof_indices, Real scaling_factor)
Definition: Assembly.C:1522
void setupDampers()
Setup damping stuff (called before we actually start)
void zeroVectorForResidual(const std::string &vector_name)
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Update supplied subdomain and variable coverate containters.
Base class for split-based preconditioners.
Definition: Split.h:30
virtual void timestepSetup()
void setConstraintSlaveValues(NumericVector< Number > &solution, bool displaced)
Sets the value of constrained variables in the solution vector.
virtual void reinitElemPhys(const Elem *elem, std::vector< Point > phys_points_in_elem, THREAD_ID tid) override
bool _debugging_residuals
true if debugging residuals
NumericVector< Number > * _Re_non_time
residual vector for non-time contributions
virtual NonlinearSolver< Number > * nonlinearSolver()=0
Real computeDamping(const NumericVector< Number > &solution, const NumericVector< Number > &update)
Compute damping.
virtual void setPreviousNewtonSolution(const NumericVector< Number > &soln)
bool _assemble_constraints_separately
Whether or not to assemble the residual and Jacobian after the application of each constraint...
virtual Elem * elemPtr(const dof_id_type i)
Definition: MooseMesh.C:2062
virtual void init() override
Initialize the system.
void addImplicitGeometricCouplingEntriesToJacobian(bool add=true)
If called with true this will add entries into the jacobian to link together degrees of freedom that ...
virtual NumericVector< Number > & addVector(const std::string &vector_name, const bool project, const ParallelType type)
Adds a solution length vector to the system.
Definition: SystemBase.C:490
virtual void setSolution(const NumericVector< Number > &soln)
subdomain_id_type SubdomainID
Definition: MooseTypes.h:77
Data structure used to hold penetration information.
void addDiracKernel(const std::string &kernel_name, const std::string &name, InputParameters parameters)
Adds a Dirac kernel.
virtual Assembly & assembly(THREAD_ID tid) override
bool _has_nodalbc_diag_save_in
If there is a nodal BC having diag_save_in.
Finds the nearest node to each node in boundary1 to each node in boundary2 and the other way around...
const std::map< BoundaryID, std::vector< std::shared_ptr< T > > > & getActiveBoundaryObjects(THREAD_ID tid=0) const
virtual const std::string & name()
Definition: SystemBase.h:453
std::vector< BoundaryID > getBoundaryIDs(const Elem *const elem, const unsigned short int side) const
Returns a vector of boundary IDs for the requested element on the requested side. ...
Definition: MooseMesh.C:1951
bool ignoreZerosInJacobian()
bool needMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on boundaries.
void mooseError(Args &&...args)
Emit an error message with the given stringified, concatenated args and terminate the application...
Definition: MooseError.h:182
virtual void subdomainSetup(THREAD_ID tid=0) const
void computeJacobianInternal(SparseMatrix< Number > &jacobian, Moose::KernelType kernel_type)
void getNodeDofs(dof_id_type node_id, std::vector< dof_id_type > &dofs)
Base class for all Constraint types.
Definition: Constraint.h:42
MooseObjectWarehouse< NodalBC > _nodal_bcs
T & set(const std::string &name, bool quiet_mode=false)
Returns a writable reference to the named parameters.
void computeResidual(NumericVector< Number > &residual, Moose::KernelType type=Moose::KT_ALL)
Computes residual.
void addBoundaryCondition(const std::string &bc_name, const std::string &name, InputParameters parameters)
Adds a boundary condition.
virtual void setException(const std::string &message)
Set an exception.
void setDecomposition(const std::vector< std::string > &decomposition)
If called with a single string, it is used as the name of a the top-level decomposition split...
Factory & _factory
Definition: SystemBase.h:481
virtual NumericVector< Number > & solutionUDot() override
MooseObjectWarehouseBase< Split > _splits
Decomposition splits.
NumericVector< Number > & _serialized_solution
Serialized version of the solution vector.
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
bool _has_nodalbc_save_in
If there is a nodal BC having save_in.
virtual void getDiracElements(std::set< const Elem * > &elems) override
Fills "elems" with the elements that should be looped over for Dirac Kernels.
MooseObjectWarehouse< NodalDamper > _nodal_dampers
Nodal Dampers for each thread.
void computeTimeDerivatives()
Computes the time derivative vector.
static PetscErrorCode Vec Mat Mat pc
void findImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data, std::map< dof_id_type, std::vector< dof_id_type >> &graph)
Finds the implicit sparsity graph between geometrically related dofs.
void setPCSide(MooseEnum pcs)
Moose::MooseKSPNormType _ksp_norm
KSP norm type.
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0)
Adds an object to the storage structure.
virtual const Node * queryNodePtr(const dof_id_type i) const
Definition: MooseMesh.C:434
Base class for a system (of equations)
Definition: SystemBase.h:91
std::shared_ptr< TimeIntegrator > _time_integrator
Time integrator.
Base class for deriving any boundary condition that works at nodes.
Definition: NodalBC.h:38
std::map< dof_id_type, PenetrationInfo * > & _penetration_info
Data structure of nodes and their associated penetration information.
virtual void reinitElem(const Elem *elem, THREAD_ID tid) override
bool hasDampers()
Whether or not this system has dampers.
ConstNodeRange * getLocalNodeRange()
Definition: MooseMesh.C:714
virtual void addExtraVectors() override
Method called during initialSetup to add extra system vector if they are required by the simulation...
Specialization of SubProblem for solving nonlinear equations plus auxiliary equations.
bool _has_save_in
If there is any Kernel or IntegratedBC having save_in.
void addConstraint(const std::string &c_name, const std::string &name, InputParameters parameters)
Adds a Constraint.
bool _have_decomposition
Whether or not the system can be decomposed into splits.
virtual void addResidualScalar(THREAD_ID tid=0)
MooseObjectWarehouse< ScalarKernel > _time_scalar_kernels
void checkKernelCoverage(const std::set< SubdomainID > &mesh_subdomains) const
void constraintResiduals(NumericVector< Number > &residual, bool displaced)
Add residual contributions from Constraints.
virtual const char * what() const
Get out the error message.
virtual void reinitNodesNeighbor(const std::vector< dof_id_type > &nodes, THREAD_ID tid) override
virtual GeometricSearchData & geomSearchData() override
Specialization for filling multiple "small" preconditioning matrices simulatenously.
virtual void update()
Update the system (doing libMesh magic)
Definition: SystemBase.C:665
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
MooseObjectWarehouse< NodalKernel > _nodal_kernels
NodalKernels for each thread.
EXTERN_C_BEGIN PetscErrorCode DMCreate_Moose(DM)
bool _need_residual_ghosted
Whether or not a ghosted copy of the residual needs to be made.
Grab all the local dof indices for the variables passed in, in the system passed in.
virtual NumericVector< Number > & solution() override
std::vector< std::unique_ptr< MooseMesh::MortarInterface > > & getMortarInterfaces()
Definition: MooseMesh.h:777
void onTimestepBegin()
Called at the beginning of the time step.
NumericVector< Number > & _residual_copy
Copy of the residual vector.
void setPredictor(std::shared_ptr< Predictor > predictor)
bool _doing_dg
true if DG is active (optimization reasons)
virtual DofMap & dofMap()
Gets the dof map.
Definition: SystemBase.C:610
virtual bool hasVector(const std::string &name)
Check if the named vector exists in the system.
Definition: SystemBase.C:589
Use whatever we have in PETSc.
Definition: MooseTypes.h:227
void addDamper(const std::string &damper_name, const std::string &name, InputParameters parameters)
Adds a damper.
virtual void addJacobianOffDiagScalar(SparseMatrix< Number > &jacobian, unsigned int ivar, THREAD_ID tid=0)
FEProblemBase & _fe_problem
virtual void restoreSolutions() override
Restore current solutions (call after your solve failed)
virtual void addJacobianScalar(SparseMatrix< Number > &jacobian, THREAD_ID tid=0)
MooseObjectWarehouse< ScalarKernel > _non_time_scalar_kernels
std::vector< dof_id_type > _slave_nodes
virtual std::shared_ptr< DisplacedProblem > getDisplacedProblem()
This is the common base class for the two main kernel types implemented in MOOSE, EigenKernel and Ker...
Definition: KernelBase.h:47
void addObject(std::shared_ptr< Constraint > object, THREAD_ID tid=0)
Add Constraint object to the warehouse.
std::vector< Point > _elem1_constraint_q_point
void updateActive(THREAD_ID tid)
Update active objects of Warehouses owned by NonlinearSystemBase.
const std::vector< std::shared_ptr< ElemElemConstraint > > & getActiveElemElemConstraints(InterfaceID interface_id) const
The DGKernel class is responsible for calculating the residuals for various physics on internal sides...
Definition: DGKernel.h:47
void setVariableGlobalDoFs(const std::string &var_name)
set all the global dof indices for a nonlinear variable
const Elem * _elem
Use whatever we have in PETSc.
Definition: MooseTypes.h:239
virtual void timestepSetup(THREAD_ID tid=0) const
virtual void cacheJacobian(THREAD_ID tid) override
virtual void addCachedResidual(THREAD_ID tid) override
bool hasActiveBoundaryObjects(THREAD_ID tid=0) const
virtual void serializeSolution()
std::shared_ptr< MoosePreconditioner > _preconditioner
Preconditioner.
virtual void jacobianSetup(THREAD_ID tid=0) const
virtual void predictorCleanup(NumericVector< Number > &ghosted_solution)
Perform cleanup tasks after application of predictor to solution vector.
MooseObjectWarehouse< InterfaceKernel > _interface_kernels
DofMap & dof_map
std::vector< std::string > _vecs_to_zero_for_residual
vectors that will be zeroed before a residual computation
void constraintJacobians(SparseMatrix< Number > &jacobian, bool displaced)
Add jacobian contributions from Constraints.
An inteface for the _console for outputting to the Console object.
virtual NumericVector< Number > & residualVector(Moose::KernelType type) override
const std::vector< numeric_index_type > & n_nz
std::shared_ptr< Split > getSplit(const std::string &name)
Retrieves a split by name.
virtual void initialSetup(THREAD_ID tid=0) const
Convenience methods for calling object setup methods.
PerfLog perf_log
Perflog to be used by applications.
void needsPreviousNewtonIteration(bool state)
Set a flag that indicated that user required values for the previous Newton iterate.
MooseObjectWarehouse< ScalarKernel > _scalar_kernels
bool hasActiveObjects(THREAD_ID tid=0) const
Convenience functions for determining if objects exist.
virtual void initialSetup()
std::map< unsigned int, std::shared_ptr< ElementPairLocator > > _element_pair_locators
This is a "smart" enum class intended to replace many of the shortcomings in the C++ enum type It sho...
Definition: MooseEnum.h:37
const ElementPairInfo & getElemPairInfo(std::pair< const Elem *, const Elem * > elem_pair) const
virtual void residualSetup(THREAD_ID tid=0) const
Base class for deriving nodal dampers.
Definition: NodalDamper.h:37
unsigned int _side_num
bool areCoupled(unsigned int ivar, unsigned int jvar)
Base class for deriving any boundary condition of a integrated type.
Definition: IntegratedBC.h:33
This is the ElementPairLocator class.
This is the ElementPairInfo class.
void addSplit(const std::string &split_name, const std::string &name, InputParameters parameters)
Adds a split.
Base class for ODEKernels that contribute to the time residual vector.
Definition: ODETimeKernel.h:30
virtual void addCachedJacobian(SparseMatrix< Number > &jacobian, THREAD_ID tid) override
bool _need_serialized_solution
Whether or not a copy of the residual needs to be made.
AuxiliarySystem & getAuxiliarySystem()
virtual void updateGeomSearch(GeometricSearchData::GeometricSearchType type=GeometricSearchData::ALL) override
InterfaceKernel is responsible for interfacing physics across subdomains.
virtual void reinitNodeFace(const Node *node, BoundaryID bnd_id, THREAD_ID tid) override
KernelType
Definition: MooseTypes.h:162
NonlinearSystemBase(FEProblemBase &problem, System &sys, const std::string &name)
MooseObjectWarehouse< DGKernel > _dg_kernels
bool hasSaveIn() const
Weather or not the nonlinear system has save-ins.
virtual void clearDiracInfo() override
Gets called before Dirac Kernels are asked to add the points they are supposed to be evaluated in...
virtual void reinitNodes(const std::vector< dof_id_type > &nodes, THREAD_ID tid) override
void zeroCachedJacobianContributions(SparseMatrix< Number > &jacobian)
Zero out previously-cached Jacobian rows.
Definition: Assembly.C:1927
ConstraintWarehouse _constraints
Constraints storage object.
void turnOffJacobian()
Turn off the Jacobian (must be called before equation system initialization)
std::map< std::pair< unsigned int, unsigned int >, PenetrationLocator * > _penetration_locators
void addImplicitGeometricCouplingEntries(SparseMatrix< Number > &jacobian, GeometricSearchData &geom_search_data)
Adds entries to the Jacobian in the correct positions for couplings coming from dofs being coupled th...
virtual void addNodalKernel(const std::string &kernel_name, const std::string &name, InputParameters parameters)
Adds a NodalKernel.
Base class for deriving element dampers.
Definition: ElementDamper.h:37
bool _add_implicit_geometric_coupling_entries_to_jacobian
Whether or not to add implicit geometric couplings to the Jacobian for FDP.
virtual NumericVector< Number > & solution() override
KernelWarehouse _kernels
std::vector< std::pair< MooseVariable *, MooseVariable * > > & couplingEntries(THREAD_ID tid)
void addCachedJacobianContributions(SparseMatrix< Number > &jacobian)
Adds previously-cached Jacobian values via SparseMatrix::add() calls.
Definition: Assembly.C:1937
Base class for creating new types of boundary conditions.
void computeResidualInternal(Moose::KernelType type=Moose::KT_ALL)
Compute the residual.
virtual unsigned int number()
Gets the number of this system.
Definition: SystemBase.C:604
MooseObjectWarehouse< DiracKernel > _dirac_kernels
Dirac Kernel storage for each thread.
void addDGKernel(std::string dg_kernel_name, const std::string &name, InputParameters parameters)
Adds a DG kernel.
void computeJacobianBlocks(std::vector< JacobianBlock * > &blocks)
Computes several Jacobian blocks simultaneously, summing their contributions into smaller preconditio...
std::vector< VariableWarehouse > _vars
Variable warehouses (one for each thread)
Definition: SystemBase.h:488
Provides a way for users to bail out of the current solve.
MatType type
unsigned int _n_residual_evaluations
Total number of residual evaluations that have been performed.
virtual void addKernel(const std::string &kernel_name, const std::string &name, InputParameters parameters)
Adds a kernel.
const SubdomainID ANY_BLOCK_ID
Definition: MooseTypes.h:117
virtual void prepareAssembly(THREAD_ID tid) override
virtual void reinitScalars(THREAD_ID tid) override
bool _has_diag_save_in
If there is any Kernel or IntegratedBC having diag_save_in.
Base class for time integrators.
void addScalarKernel(const std::string &kernel_name, const std::string &name, InputParameters parameters)
Adds a scalar kernel.
bool hasDiagSaveIn() const
Weather or not the nonlinear system has diagonal Jacobian save-ins.
void reinitIncrementAtNodeForDampers(THREAD_ID tid, const std::set< MooseVariable * > &damped_vars)
Compute the incremental change in variables at nodes for dampers.
StoredRange< std::set< const Elem * >::const_iterator, const Elem * > DistElemRange
MooseMesh & _mesh
Definition: SystemBase.h:483
bool _need_residual_copy
Whether or not a copy of the residual needs to be made.
std::map< std::pair< unsigned int, unsigned int >, NearestNodeLocator * > _nearest_node_locators
NumericVector< Number > * _solution_previous_nl
Solution vector of the previous nonlinear iterate.
MooseObjectWarehouse< ElementDamper > _element_dampers
Element Dampers for each thread.
virtual System & system() override
Get the reference to the libMesh system.
const std::vector< std::shared_ptr< NodalConstraint > > & getActiveNodalConstraints() const
Access methods for active objects.
virtual void setCurrentSubdomainID(const Elem *elem, THREAD_ID tid) override
virtual void setSolutionUDot(const NumericVector< Number > &udot)
Set transient term used by residual and Jacobian evaluation.
Moose::PCSideType _pc_side
Preconditioning side.
void updateActive(THREAD_ID tid=0)
Update the various active lists.
void computeNodalBCs(NumericVector< Number > &residual, Moose::KernelType type=Moose::KT_ALL)
Enforces nodal boundary conditions.
All time kernels should inherit from this class.
Definition: TimeKernel.h:30
virtual void cacheJacobianNeighbor(THREAD_ID tid) override
void addObject(std::shared_ptr< KernelBase > object, THREAD_ID tid=0) override
Add Kernel to the storage structure.
Base class for deriving dampers.
Definition: Damper.h:35
bool hasActiveElemElemConstraints(const InterfaceID interface_id) const
bool hasActiveNodalConstraints() const
Deterimine if active objects exist.
const NumericVector< Number > * _current_solution
solution vector from nonlinear solver
virtual const Node & nodeRef(const dof_id_type i) const
Definition: MooseMesh.C:398
NumericVector< Number > * _residual_ghosted
ghosted form of the residual
Moose::CouplingType coupling()
const ElementPairList & getElemPairs() const
KernelWarehouse _time_kernels
bool hasActiveBlockObjects(THREAD_ID tid=0) const
NumericVector< Number > * _increment_vec
increment vector
virtual NumericVector< Number > & serializedSolution() override
Returns a reference to a serialized version of the solution vector for this subproblem.
std::vector< Point > _elem2_constraint_q_point
void addInterfaceKernel(std::string interface_kernel_name, const std::string &name, InputParameters parameters)
Adds an interface kernel.
const std::vector< std::shared_ptr< FaceFaceConstraint > > & getActiveFaceFaceConstraints(const std::string &interface) const
MooseObjectWarehouse< IntegratedBC > _integrated_bcs
Definition: Moose.h:84
const ConsoleStream _console
An instance of helper class to write streams to the Console objects.
virtual bool hasResidualVector(Moose::KernelType type) const override
void reinitIncrementAtQpsForDampers(THREAD_ID tid, const std::set< MooseVariable * > &damped_vars)
Compute the incremental change in variables at QPs for dampers.
virtual void cacheResidual(THREAD_ID tid) override
virtual void reinitOffDiagScalars(THREAD_ID tid) override
void addTimeIntegrator(const std::string &type, const std::string &name, InputParameters parameters)
Add a time integrator.
bool _has_constraints
Whether or not this system has any Constraints.
StoredRange< MooseMesh::const_bnd_node_iterator, const BndNode * > ConstBndNodeRange
Some useful StoredRange typedefs.
Definition: MooseMesh.h:1184
std::string _decomposition_split
Name of the top-level split of the decomposition.
virtual void reinitNode(const Node *node, THREAD_ID tid) override
Base class for creating new types of boundary conditions.
Definition: NodalKernel.h:50
std::shared_ptr< Predictor > _predictor
If predictor is active, this is non-NULL.
void enforceNodalConstraintsResidual(NumericVector< Number > &residual)
Enforce nodal constraints.
const std::vector< std::shared_ptr< T > > & getActiveObjects(THREAD_ID tid=0) const
Retrieve complete vector to the active all/block/boundary restricted objects for a given thread...
MooseObjectWarehouse< GeneralDamper > _general_dampers
General Dampers.
virtual void subdomainSetup(SubdomainID subdomain, THREAD_ID tid)
Called from assembling when we hit a new subdomain.
virtual void restoreSolutions()
Restore current solutions (call after your solve failed)
Definition: SystemBase.C:705
virtual NumericVector< Number > & getVector(const std::string &name)
Get a raw NumericVector.
Definition: SystemBase.C:598
const std::vector< VariableName > & getVariableNames() const
Definition: SystemBase.h:466
void computeJacobian(SparseMatrix< Number > &jacobian, Moose::KernelType kernel_type=Moose::KT_ALL)
Computes Jacobian.
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Populates a set of covered subdomains and the associated variable names.
StoredRange< MooseMesh::const_bnd_node_iterator, const BndNode * > * getBoundaryNodeRange()
Definition: MooseMesh.C:724
A DiracKernel is used when you need to add contributions to the residual by means of multiplying some...
Definition: DiracKernel.h:50
void enableFPE(bool on=true)
Definition: Moose.C:1252
bool hasActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
const std::vector< std::shared_ptr< NodeFaceConstraint > > & getActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
virtual NumericVector< Number > & residualCopy() override
virtual unsigned int nVariables()
Get the number of variables in this system.
Definition: SystemBase.C:580
const std::vector< numeric_index_type > & n_oz
virtual NumericVector< Number > & residualGhosted() override
virtual void updateActive(THREAD_ID tid=0)
Updates the active objects storage.
bool hasActiveFaceFaceConstraints(const std::string &interface) const
void setMooseKSPNormType(MooseEnum kspnorm)
virtual void addEigenKernels(std::shared_ptr< KernelBase >, THREAD_ID)
boundary_id_type BoundaryID
Definition: MooseTypes.h:75
unsigned int THREAD_ID
Definition: MooseTypes.h:79
void computeDiracContributions(SparseMatrix< Number > *jacobian=NULL)
KernelWarehouse _non_time_kernels
void setCachedJacobianContributions(SparseMatrix< Number > &jacobian)
Sets previously-cached Jacobian values via SparseMatrix::set() calls.
Definition: Assembly.C:1911
NearestNodeLocator & _nearest_node
const std::map< dof_id_type, std::vector< dof_id_type > > & nodeToElemMap()
If not already created, creates a map from every node to all elements to which they are connected...
Definition: MooseMesh.C:642
bool doingDG() const
Getter for _doing_dg.
virtual void setNeighborSubdomainID(const Elem *elem, unsigned int side, THREAD_ID tid) override
virtual void setResidual(NumericVector< Number > &residual, THREAD_ID tid) override
NumericVector< Number > * _u_dot
solution vector for u^dot
virtual void augmentSparsity(SparsityPattern::Graph &sparsity, std::vector< dof_id_type > &n_nz, std::vector< dof_id_type > &n_oz) override
Will modify the sparsity pattern to add logical geometric connections.
virtual void cacheResidualNeighbor(THREAD_ID tid) override
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, THREAD_ID tid) override
MooseObjectWarehouse< PresetNodalBC > _preset_nodal_bcs
void setPreconditioner(std::shared_ptr< MoosePreconditioner > pc)
Sets a preconditioner.