libMesh
petsc_vector.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2024 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 #include "libmesh/petsc_vector.h"
19 
20 #ifdef LIBMESH_HAVE_PETSC
21 
22 // libMesh includes
23 #include "libmesh/petsc_matrix.h"
24 #include "libmesh/dense_subvector.h"
25 #include "libmesh/dense_vector.h"
26 #include "libmesh/int_range.h"
27 #include "libmesh/petsc_macro.h"
28 #include "libmesh/wrapped_petsc.h"
29 
30 // TIMPI includes
31 #include "timpi/op_function.h"
33 #include "timpi/standard_type.h"
34 
35 // C++ includes
36 #include <numeric> // std::iota
37 
38 namespace libMesh
39 {
40 //-----------------------------------------------------------------------
41 // PetscVector members
42 
43 template <typename T>
45 {
46  this->_restore_array();
47  libmesh_assert(this->closed());
48 
49  PetscErrorCode ierr=0;
50  PetscScalar value=0.;
51 
52  ierr = VecSum (_vec, &value);
53  LIBMESH_CHKERR(ierr);
54 
55  return static_cast<T>(value);
56 }
57 
58 
59 template <typename T>
61 {
62  this->_restore_array();
63  libmesh_assert(this->closed());
64 
65  PetscErrorCode ierr=0;
66  PetscReal value=0.;
67 
68  ierr = VecNorm (_vec, NORM_1, &value);
69  LIBMESH_CHKERR(ierr);
70 
71  return static_cast<Real>(value);
72 }
73 
74 
75 
76 template <typename T>
78 {
79  parallel_object_only();
80 
81  this->_restore_array();
82  libmesh_assert(this->closed());
83 
84  PetscErrorCode ierr=0;
85  PetscReal value=0.;
86 
87  ierr = VecNorm (_vec, NORM_2, &value);
88  LIBMESH_CHKERR(ierr);
89 
90  return static_cast<Real>(value);
91 }
92 
93 
94 
95 
96 template <typename T>
98 {
99  parallel_object_only();
100 
101  this->_restore_array();
102  libmesh_assert(this->closed());
103 
104  PetscErrorCode ierr=0;
105  PetscReal value=0.;
106 
107  ierr = VecNorm (_vec, NORM_INFINITY, &value);
108  LIBMESH_CHKERR(ierr);
109 
110  return static_cast<Real>(value);
111 }
112 
113 
114 
115 
116 template <typename T>
119 {
120  parallel_object_only();
121 
122  this->_restore_array();
123  libmesh_assert(this->closed());
124 
125  this->add(1., v);
126 
127  return *this;
128 }
129 
130 
131 
132 template <typename T>
135 {
136  parallel_object_only();
137 
138  this->_restore_array();
139  libmesh_assert(this->closed());
140 
141  this->add(-1., v);
142 
143  return *this;
144 }
145 
146 
147 
148 template <typename T>
150 {
151  this->_restore_array();
152  libmesh_assert_less (i, size());
153 
154  PetscErrorCode ierr=0;
155  PetscInt i_val = static_cast<PetscInt>(i);
156  PetscScalar petsc_value = PS(value);
157 
158  std::scoped_lock lock(this->_numeric_vector_mutex);
159  ierr = VecSetValues (_vec, 1, &i_val, &petsc_value, INSERT_VALUES);
160  LIBMESH_CHKERR(ierr);
161 
162  this->_is_closed = false;
163 }
164 
165 
166 
167 template <typename T>
169 {
170  parallel_object_only();
171 
172  PetscErrorCode ierr = 0;
173 
174  // VecReciprocal has been in PETSc since at least 2.3.3 days
175  ierr = VecReciprocal(_vec);
176  LIBMESH_CHKERR(ierr);
177 }
178 
179 
180 
181 template <typename T>
183 {
184  parallel_object_only();
185 
186  PetscErrorCode ierr = 0;
187 
188  // We just call the PETSc VecConjugate
189  ierr = VecConjugate(_vec);
190  LIBMESH_CHKERR(ierr);
191 }
192 
193 
194 
195 template <typename T>
197 {
198  this->_restore_array();
199  libmesh_assert_less (i, size());
200 
201  PetscErrorCode ierr=0;
202  PetscInt i_val = static_cast<PetscInt>(i);
203  PetscScalar petsc_value = PS(value);
204 
205  std::scoped_lock lock(this->_numeric_vector_mutex);
206  ierr = VecSetValues (_vec, 1, &i_val, &petsc_value, ADD_VALUES);
207  LIBMESH_CHKERR(ierr);
208 
209  this->_is_closed = false;
210 }
211 
212 
213 
214 template <typename T>
215 void PetscVector<T>::add_vector (const T * v,
216  const std::vector<numeric_index_type> & dof_indices)
217 {
218  // If we aren't adding anything just return
219  if (dof_indices.empty())
220  return;
221 
222  this->_restore_array();
223 
224  PetscErrorCode ierr=0;
225  const PetscInt * i_val = reinterpret_cast<const PetscInt *>(dof_indices.data());
226  const PetscScalar * petsc_value = pPS(v);
227 
228  std::scoped_lock lock(this->_numeric_vector_mutex);
229  ierr = VecSetValues (_vec, cast_int<PetscInt>(dof_indices.size()),
230  i_val, petsc_value, ADD_VALUES);
231  LIBMESH_CHKERR(ierr);
232 
233  this->_is_closed = false;
234 }
235 
236 
237 
238 template <typename T>
240  const SparseMatrix<T> & A_in)
241 {
242  parallel_object_only();
243 
244  this->_restore_array();
245  // Make sure the data passed in are really of Petsc types
246  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
247  const PetscMatrix<T> * A = cast_ptr<const PetscMatrix<T> *>(&A_in);
248 
249  PetscErrorCode ierr=0;
250 
251  // We shouldn't close() the matrix for you, as that would potentially modify the state of a const object.
252  if (!A->closed())
253  {
254  libmesh_warning("Matrix A must be assembled before calling PetscVector::add_vector(v, A).\n"
255  "Please update your code, as this warning will become an error in a future release.");
256  libmesh_deprecated();
257  const_cast<PetscMatrix<T> *>(A)->close();
258  }
259 
260  // The const_cast<> is not elegant, but it is required since PETSc
261  // expects a non-const Mat.
262  ierr = MatMultAdd(const_cast<PetscMatrix<T> *>(A)->mat(), v->_vec, _vec, _vec);
263  LIBMESH_CHKERR(ierr);
264 }
265 
266 
267 
268 template <typename T>
270  const SparseMatrix<T> & A_in)
271 {
272  parallel_object_only();
273 
274  this->_restore_array();
275  // Make sure the data passed in are really of Petsc types
276  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
277  const PetscMatrix<T> * A = cast_ptr<const PetscMatrix<T> *>(&A_in);
278 
279  PetscErrorCode ierr=0;
280 
281  // We shouldn't close() the matrix for you, as that would potentially modify the state of a const object.
282  if (!A->closed())
283  {
284  libmesh_warning("Matrix A must be assembled before calling PetscVector::add_vector_transpose(v, A).\n"
285  "Please update your code, as this warning will become an error in a future release.");
286  libmesh_deprecated();
287  const_cast<PetscMatrix<T> *>(A)->close();
288  }
289 
290  // The const_cast<> is not elegant, but it is required since PETSc
291  // expects a non-const Mat.
292  ierr = MatMultTransposeAdd(const_cast<PetscMatrix<T> *>(A)->mat(), v->_vec, _vec, _vec);
293  LIBMESH_CHKERR(ierr);
294 }
295 
296 
297 
298 template <typename T>
300  const SparseMatrix<T> & A_in)
301 {
302  parallel_object_only();
303 
304  this->_restore_array();
305  // Make sure the data passed in are really of Petsc types
306  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
307  const PetscMatrix<T> * A = cast_ptr<const PetscMatrix<T> *>(&A_in);
308 
309  // We shouldn't close() the matrix for you, as that would potentially modify the state of a const object.
310  if (!A->closed())
311  {
312  libmesh_warning("Matrix A must be assembled before calling PetscVector::add_vector_conjugate_transpose(v, A).\n"
313  "Please update your code, as this warning will become an error in a future release.");
314  libmesh_deprecated();
315  const_cast<PetscMatrix<T> *>(A)->close();
316  }
317 
318  // Store a temporary copy since MatMultHermitianTransposeAdd doesn't seem to work
319  // TODO: Find out why MatMultHermitianTransposeAdd doesn't work, might be a PETSc bug?
320  std::unique_ptr<NumericVector<Number>> this_clone = this->clone();
321 
322  // The const_cast<> is not elegant, but it is required since PETSc
323  // expects a non-const Mat.
324  PetscErrorCode ierr = MatMultHermitianTranspose(const_cast<PetscMatrix<T> *>(A)->mat(), v->_vec, _vec);
325  LIBMESH_CHKERR(ierr);
326 
327  // Add the temporary copy to the matvec result
328  this->add(1., *this_clone);
329 }
330 
331 
332 
333 template <typename T>
334 void PetscVector<T>::add (const T v_in)
335 {
336  this->_get_array(false);
337 
338  for (numeric_index_type i=0; i<_local_size; i++)
339  _values[i] += PetscScalar(v_in);
340 }
341 
342 
343 
344 template <typename T>
346 {
347  parallel_object_only();
348 
349  this->add (1., v);
350 }
351 
352 
353 
354 template <typename T>
355 void PetscVector<T>::add (const T a_in, const NumericVector<T> & v_in)
356 {
357  parallel_object_only();
358 
359  this->_restore_array();
360 
361  // VecAXPY doesn't support &x==&y
362  if (this == &v_in)
363  {
364  this->scale(a_in+1);
365  return;
366  }
367 
368  PetscScalar a = PS(a_in);
369 
370  // Make sure the NumericVector passed in is really a PetscVector
371  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
372  v->_restore_array();
373 
374  libmesh_assert_equal_to (this->size(), v->size());
375 
376  PetscErrorCode ierr = VecAXPY(_vec, a, v->vec());
377  LIBMESH_CHKERR(ierr);
378 
379  libmesh_assert(this->comm().verify(int(this->type())));
380 
381  if (this->type() == GHOSTED)
382  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
383 
384  this->_is_closed = true;
385 }
386 
387 
388 
389 template <typename T>
390 void PetscVector<T>::insert (const T * v,
391  const std::vector<numeric_index_type> & dof_indices)
392 {
393  if (dof_indices.empty())
394  return;
395 
396  this->_restore_array();
397 
398  PetscErrorCode ierr=0;
399  PetscInt * idx_values = numeric_petsc_cast(dof_indices.data());
400  std::scoped_lock lock(this->_numeric_vector_mutex);
401  ierr = VecSetValues (_vec, cast_int<PetscInt>(dof_indices.size()),
402  idx_values, pPS(v), INSERT_VALUES);
403  LIBMESH_CHKERR(ierr);
404 
405  this->_is_closed = false;
406 }
407 
408 
409 
410 template <typename T>
411 void PetscVector<T>::scale (const T factor_in)
412 {
413  parallel_object_only();
414 
415  this->_restore_array();
416 
417  PetscScalar factor = PS(factor_in);
418 
419  PetscErrorCode ierr = VecScale(_vec, factor);
420  LIBMESH_CHKERR(ierr);
421 
422  libmesh_assert(this->comm().verify(int(this->type())));
423 
424  if (this->type() == GHOSTED)
425  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
426 }
427 
428 template <typename T>
430 {
431  parallel_object_only();
432 
433  PetscErrorCode ierr = 0;
434 
435  const PetscVector<T> * v_vec = cast_ptr<const PetscVector<T> *>(&v);
436 
437  ierr = VecPointwiseMult(_vec, _vec, v_vec->_vec);
438  LIBMESH_CHKERR(ierr);
439 
440  return *this;
441 }
442 
443 template <typename T>
445 {
446  parallel_object_only();
447 
448  PetscErrorCode ierr = 0;
449 
450  const PetscVector<T> * v_vec = cast_ptr<const PetscVector<T> *>(&v);
451 
452  ierr = VecPointwiseDivide(_vec, _vec, v_vec->_vec);
453  LIBMESH_CHKERR(ierr);
454 
455  return *this;
456 }
457 
458 template <typename T>
460 {
461  parallel_object_only();
462 
463  this->_restore_array();
464 
465  PetscErrorCode ierr = VecAbs(_vec);
466  LIBMESH_CHKERR(ierr);
467 
468  libmesh_assert(this->comm().verify(int(this->type())));
469 
470  if (this->type() == GHOSTED)
471  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
472 }
473 
474 template <typename T>
475 T PetscVector<T>::dot (const NumericVector<T> & v_in) const
476 {
477  parallel_object_only();
478 
479  this->_restore_array();
480 
481  // Error flag
482  PetscErrorCode ierr = 0;
483 
484  // Return value
485  PetscScalar value=0.;
486 
487  // Make sure the NumericVector passed in is really a PetscVector
488  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
489 
490  // 2.3.x (at least) style. Untested for previous versions.
491  ierr = VecDot(this->_vec, v->_vec, &value);
492  LIBMESH_CHKERR(ierr);
493 
494  return static_cast<T>(value);
495 }
496 
497 template <typename T>
499 {
500  parallel_object_only();
501 
502  this->_restore_array();
503 
504  // Error flag
505  PetscErrorCode ierr = 0;
506 
507  // Return value
508  PetscScalar value=0.;
509 
510  // Make sure the NumericVector passed in is really a PetscVector
511  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
512 
513  // 2.3.x (at least) style. Untested for previous versions.
514  ierr = VecTDot(this->_vec, v->_vec, &value);
515  LIBMESH_CHKERR(ierr);
516 
517  return static_cast<T>(value);
518 }
519 
520 
521 template <typename T>
524 {
525  parallel_object_only();
526 
527  this->_restore_array();
528  libmesh_assert(this->closed());
529 
530  PetscScalar s = PS(s_in);
531 
532  if (this->size() != 0)
533  {
534  PetscErrorCode ierr = VecSet(_vec, s);
535  LIBMESH_CHKERR(ierr);
536 
537  libmesh_assert(this->comm().verify(int(this->type())));
538 
539  if (this->type() == GHOSTED)
540  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
541  }
542 
543  return *this;
544 }
545 
546 
547 
548 template <typename T>
551 {
552  parallel_object_only();
553 
554  // Make sure the NumericVector passed in is really a PetscVector
555  const PetscVector<T> * v = cast_ptr<const PetscVector<T> *>(&v_in);
556 
557  *this = *v;
558 
559  return *this;
560 }
561 
562 
563 
564 template <typename T>
567 {
568  parallel_object_only();
569 
570  this->_restore_array();
571  v._restore_array();
572 
573  libmesh_assert_equal_to (this->size(), v.size());
574  libmesh_assert_equal_to (this->local_size(), v.local_size());
575  libmesh_assert (v.closed());
576 
577  PetscErrorCode ierr = 0;
578 
579  ierr = VecCopy (v._vec, this->_vec);
580  LIBMESH_CHKERR(ierr);
581 
582  libmesh_assert(this->comm().verify(int(this->type())));
583 
584  if (this->type() == GHOSTED)
585  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
586 
587  this->_is_closed = true;
588 
589  return *this;
590 }
591 
592 
593 
594 template <typename T>
596 PetscVector<T>::operator = (const std::vector<T> & v)
597 {
598  parallel_object_only();
599 
600  this->_get_array(false);
601 
606  if (this->size() == v.size())
607  {
608  numeric_index_type first = first_local_index();
609  numeric_index_type last = last_local_index();
610  for (numeric_index_type i=0; i<last-first; i++)
611  _values[i] = PS(v[first + i]);
612  }
613 
618  else
619  {
620  for (numeric_index_type i=0; i<_local_size; i++)
621  _values[i] = PS(v[i]);
622  }
623 
624  // Make sure ghost dofs are up to date
625  if (this->type() == GHOSTED)
626  this->close();
627 
628  return *this;
629 }
630 
631 
632 
633 template <typename T>
635 {
636  parallel_object_only();
637 
638  this->_restore_array();
639 
640  // Make sure the NumericVector passed in is really a PetscVector
641  PetscVector<T> * v_local = cast_ptr<PetscVector<T> *>(&v_local_in);
642 
643  libmesh_assert(v_local);
644  // v_local_in should be closed
645  libmesh_assert(v_local->closed());
646  libmesh_assert_equal_to (v_local->size(), this->size());
647  // 1) v_local_in is a large vector to hold the whole world
648  // 2) v_local_in is a ghosted vector
649  // 3) v_local_in is a parallel vector
650  // Cases 2) and 3) should be scalable
651  libmesh_assert(this->size()==v_local->local_size() || this->local_size()==v_local->local_size());
652 
653  PetscErrorCode ierr = 0;
654 
655  if (v_local->type() == SERIAL && this->size() == v_local->local_size())
656  {
657  WrappedPetsc<VecScatter> scatter;
658  ierr = VecScatterCreateToAll(_vec, scatter.get(), nullptr);
659  LIBMESH_CHKERR(ierr);
660  VecScatterBeginEnd(this->comm(), scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD);
661  }
662  // Two vectors have the same size, and we should just do a simple copy.
663  // v_local could be either a parallel or ghosted vector
664  else if (this->local_size() == v_local->local_size())
665  {
666  ierr = VecCopy(_vec,v_local->_vec);
667  LIBMESH_CHKERR(ierr);
668  }
669  else
670  {
671  libmesh_error_msg("Vectors are inconsistent");
672  }
673 
674  // Make sure ghost dofs are up to date
675  // We do not call "close" here to save a global reduction
676  if (v_local->type() == GHOSTED)
677  VecGhostUpdateBeginEnd(this->comm(), v_local->_vec, INSERT_VALUES, SCATTER_FORWARD);
678 }
679 
680 
681 
682 template <typename T>
684  const std::vector<numeric_index_type> & send_list) const
685 {
686  parallel_object_only();
687 
688  libmesh_assert(this->comm().verify(int(this->type())));
689  libmesh_assert(this->comm().verify(int(v_local_in.type())));
690 
691  // FIXME: Workaround for a strange bug at large-scale.
692  // If we have ghosting, PETSc lets us just copy the solution, and
693  // doing so avoids a segfault?
694  if (v_local_in.type() == GHOSTED &&
695  this->type() == PARALLEL)
696  {
697  v_local_in = *this;
698  return;
699  }
700 
701  // Normal code path begins here
702 
703  this->_restore_array();
704 
705  // Make sure the NumericVector passed in is really a PetscVector
706  PetscVector<T> * v_local = cast_ptr<PetscVector<T> *>(&v_local_in);
707 
708  libmesh_assert(v_local);
709  libmesh_assert_equal_to (v_local->size(), this->size());
710  libmesh_assert_less_equal (send_list.size(), v_local->size());
711 
712  PetscErrorCode ierr=0;
713  const numeric_index_type n_sl =
714  cast_int<numeric_index_type>(send_list.size());
715 
716  std::vector<PetscInt> idx(n_sl + this->local_size());
717  for (numeric_index_type i=0; i<n_sl; i++)
718  idx[i] = static_cast<PetscInt>(send_list[i]);
719  for (auto i : make_range(this->local_size()))
720  idx[n_sl+i] = i + this->first_local_index();
721 
722  // Create the index set & scatter objects
724  PetscInt * idxptr = idx.empty() ? nullptr : idx.data();
725  ierr = ISCreateGeneral(this->comm().get(), n_sl+this->local_size(),
726  idxptr, PETSC_USE_POINTER, is.get());
727  LIBMESH_CHKERR(ierr);
728 
729  WrappedPetsc<VecScatter> scatter;
730  ierr = VecScatterCreate(_vec, is,
731  v_local->_vec, is,
732  scatter.get());
733  LIBMESH_CHKERR(ierr);
734 
735 
736  // Perform the scatter
737  VecScatterBeginEnd(this->comm(), scatter, _vec, v_local->_vec, INSERT_VALUES, SCATTER_FORWARD);
738 
739  // Make sure ghost dofs are up to date
740  if (v_local->type() == GHOSTED)
741  v_local->close();
742 }
743 
744 
745 
746 template <typename T>
747 void PetscVector<T>::localize (std::vector<T> & v_local,
748  const std::vector<numeric_index_type> & indices) const
749 {
750  parallel_object_only();
751 
752  // Error code used to check the status of all PETSc function calls.
753  PetscErrorCode ierr = 0;
754 
755  // Create a sequential destination Vec with the right number of entries on each proc.
756  WrappedPetsc<Vec> dest;
757  ierr = VecCreateSeq(PETSC_COMM_SELF, cast_int<PetscInt>(indices.size()), dest.get());
758  LIBMESH_CHKERR(ierr);
759 
760  // Create an IS using the libmesh routine. PETSc does not own the
761  // IS memory in this case, it is automatically cleaned up by the
762  // std::vector destructor.
763  PetscInt * idxptr =
764  indices.empty() ? nullptr : numeric_petsc_cast(indices.data());
766  ierr = ISCreateGeneral(this->comm().get(), cast_int<PetscInt>(indices.size()), idxptr,
767  PETSC_USE_POINTER, is.get());
768  LIBMESH_CHKERR(ierr);
769 
770  // Create the VecScatter object. "PETSC_NULL" means "use the identity IS".
771  WrappedPetsc<VecScatter> scatter;
772  ierr = VecScatterCreate(_vec,
773  /*src is=*/is,
774  /*dest vec=*/dest,
775  /*dest is=*/LIBMESH_PETSC_NULLPTR,
776  scatter.get());
777  LIBMESH_CHKERR(ierr);
778 
779  // Do the scatter
780  VecScatterBeginEnd(this->comm(), scatter, _vec, dest, INSERT_VALUES, SCATTER_FORWARD);
781 
782  // Get access to the values stored in dest.
783  PetscScalar * values;
784  ierr = VecGetArray (dest, &values);
785  LIBMESH_CHKERR(ierr);
786 
787  // Store values into the provided v_local. Make sure there is enough
788  // space reserved and then clear out any existing entries before
789  // inserting.
790  v_local.reserve(indices.size());
791  v_local.clear();
792  v_local.insert(v_local.begin(), values, values+indices.size());
793 
794  // We are done using it, so restore the array.
795  ierr = VecRestoreArray (dest, &values);
796  LIBMESH_CHKERR(ierr);
797 }
798 
799 
800 
801 template <typename T>
802 void PetscVector<T>::localize (const numeric_index_type first_local_idx,
803  const numeric_index_type last_local_idx,
804  const std::vector<numeric_index_type> & send_list)
805 {
806  parallel_object_only();
807 
808  this->_restore_array();
809 
810  libmesh_assert_less_equal (send_list.size(), this->size());
811  libmesh_assert_less_equal (last_local_idx+1, this->size());
812 
813  const numeric_index_type my_size = this->size();
814  const numeric_index_type my_local_size = (last_local_idx + 1 - first_local_idx);
815  PetscErrorCode ierr=0;
816 
817  // Don't bother for serial cases
818  // if ((first_local_idx == 0) &&
819  // (my_local_size == my_size))
820  // But we do need to stay in sync for degenerate cases
821  if (this->n_processors() == 1)
822  return;
823 
824 
825  // Build a parallel vector, initialize it with the local
826  // parts of (*this)
827  PetscVector<T> parallel_vec(this->comm(), PARALLEL);
828 
829  parallel_vec.init (my_size, my_local_size, true, PARALLEL);
830 
831 
832  // Copy part of *this into the parallel_vec
833  {
834  // Create idx, idx[i] = i+first_local_idx;
835  std::vector<PetscInt> idx(my_local_size);
836  std::iota (idx.begin(), idx.end(), first_local_idx);
837 
838  // Create the index set & scatter objects
840  ierr = ISCreateGeneral(this->comm().get(), my_local_size,
841  my_local_size ? idx.data() : nullptr, PETSC_USE_POINTER, is.get());
842  LIBMESH_CHKERR(ierr);
843 
844  WrappedPetsc<VecScatter> scatter;
845  ierr = VecScatterCreate(_vec, is,
846  parallel_vec._vec, is,
847  scatter.get());
848  LIBMESH_CHKERR(ierr);
849 
850  // Perform the scatter
851  VecScatterBeginEnd(this->comm(), scatter, _vec, parallel_vec._vec, INSERT_VALUES, SCATTER_FORWARD);
852  }
853 
854  // localize like normal
855  parallel_vec.close();
856  parallel_vec.localize (*this, send_list);
857  this->close();
858 }
859 
860 
861 
862 template <typename T>
863 void PetscVector<T>::localize (std::vector<T> & v_local) const
864 {
865  parallel_object_only();
866 
867  this->_restore_array();
868 
869  // This function must be run on all processors at once
870  parallel_object_only();
871 
872  PetscErrorCode ierr=0;
873  const PetscInt n = this->size();
874  const PetscInt nl = this->local_size();
875  PetscScalar * values;
876 
877  v_local.clear();
878  v_local.resize(n, 0.);
879 
880  ierr = VecGetArray (_vec, &values);
881  LIBMESH_CHKERR(ierr);
882 
883  numeric_index_type ioff = first_local_index();
884 
885  for (PetscInt i=0; i<nl; i++)
886  v_local[i+ioff] = static_cast<T>(values[i]);
887 
888  ierr = VecRestoreArray (_vec, &values);
889  LIBMESH_CHKERR(ierr);
890 
891  this->comm().sum(v_local);
892 }
893 
894 
895 
896 // Full specialization for Real datatypes
897 #ifdef LIBMESH_USE_REAL_NUMBERS
898 
899 template <>
900 void PetscVector<Real>::localize_to_one (std::vector<Real> & v_local,
901  const processor_id_type
902  timpi_mpi_var(pid)) const
903 {
904  parallel_object_only();
905 
906  this->_restore_array();
907 
908  PetscErrorCode ierr=0;
909  const PetscInt n = size();
910  PetscScalar * values;
911 
912  // only one processor
913  if (n_processors() == 1)
914  {
915  v_local.resize(n);
916 
917  ierr = VecGetArray (_vec, &values);
918  LIBMESH_CHKERR(ierr);
919 
920  for (PetscInt i=0; i<n; i++)
921  v_local[i] = static_cast<Real>(values[i]);
922 
923  ierr = VecRestoreArray (_vec, &values);
924  LIBMESH_CHKERR(ierr);
925  }
926 
927  // otherwise multiple processors
928 #ifdef LIBMESH_HAVE_MPI
929  else
930  {
931  if (pid == 0) // optimized version for localizing to 0
932  {
933  WrappedPetsc<Vec> vout;
935 
936  ierr = VecScatterCreateToZero(_vec, ctx.get(), vout.get());
937  LIBMESH_CHKERR(ierr);
938 
939  VecScatterBeginEnd(this->comm(), ctx, _vec, vout, INSERT_VALUES, SCATTER_FORWARD);
940 
941  if (processor_id() == 0)
942  {
943  v_local.resize(n);
944 
945  ierr = VecGetArray (vout, &values);
946  LIBMESH_CHKERR(ierr);
947 
948  for (PetscInt i=0; i<n; i++)
949  v_local[i] = static_cast<Real>(values[i]);
950 
951  ierr = VecRestoreArray (vout, &values);
952  LIBMESH_CHKERR(ierr);
953  }
954  }
955  else
956  {
957  v_local.resize(n);
958 
959  numeric_index_type ioff = this->first_local_index();
960  std::vector<Real> local_values (n, 0.);
961 
962  {
963  ierr = VecGetArray (_vec, &values);
964  LIBMESH_CHKERR(ierr);
965 
966  const PetscInt nl = local_size();
967  for (PetscInt i=0; i<nl; i++)
968  local_values[i+ioff] = static_cast<Real>(values[i]);
969 
970  ierr = VecRestoreArray (_vec, &values);
971  LIBMESH_CHKERR(ierr);
972  }
973 
974 
975  MPI_Reduce (local_values.data(), v_local.data(), n,
978  this->comm().get());
979  }
980  }
981 #endif // LIBMESH_HAVE_MPI
982 }
983 
984 #endif
985 
986 
987 // Full specialization for Complex datatypes
988 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
989 
990 template <>
991 void PetscVector<Complex>::localize_to_one (std::vector<Complex> & v_local,
992  const processor_id_type pid) const
993 {
994  parallel_object_only();
995 
996  this->_restore_array();
997 
998  PetscErrorCode ierr=0;
999  const PetscInt n = size();
1000  const PetscInt nl = local_size();
1001  PetscScalar * values;
1002 
1003 
1004  v_local.resize(n);
1005 
1006 
1007  for (PetscInt i=0; i<n; i++)
1008  v_local[i] = 0.;
1009 
1010  // only one processor
1011  if (n_processors() == 1)
1012  {
1013  ierr = VecGetArray (_vec, &values);
1014  LIBMESH_CHKERR(ierr);
1015 
1016  for (PetscInt i=0; i<n; i++)
1017  v_local[i] = static_cast<Complex>(values[i]);
1018 
1019  ierr = VecRestoreArray (_vec, &values);
1020  LIBMESH_CHKERR(ierr);
1021  }
1022 
1023  // otherwise multiple processors
1024  else
1025  {
1026  numeric_index_type ioff = this->first_local_index();
1027 
1028  /* in here the local values are stored, acting as send buffer for MPI
1029  * initialize to zero, since we collect using MPI_SUM
1030  */
1031  std::vector<Real> real_local_values(n, 0.);
1032  std::vector<Real> imag_local_values(n, 0.);
1033 
1034  {
1035  ierr = VecGetArray (_vec, &values);
1036  LIBMESH_CHKERR(ierr);
1037 
1038  // provide my local share to the real and imag buffers
1039  for (PetscInt i=0; i<nl; i++)
1040  {
1041  real_local_values[i+ioff] = static_cast<Complex>(values[i]).real();
1042  imag_local_values[i+ioff] = static_cast<Complex>(values[i]).imag();
1043  }
1044 
1045  ierr = VecRestoreArray (_vec, &values);
1046  LIBMESH_CHKERR(ierr);
1047  }
1048 
1049  /* have buffers of the real and imaginary part of v_local.
1050  * Once MPI_Reduce() collected all the real and imaginary
1051  * parts in these std::vector<Real>, the values can be
1052  * copied to v_local
1053  */
1054  std::vector<Real> real_v_local(n);
1055  std::vector<Real> imag_v_local(n);
1056 
1057  // collect entries from other proc's in real_v_local, imag_v_local
1058  MPI_Reduce (real_local_values.data(),
1059  real_v_local.data(), n,
1062  pid, this->comm().get());
1063 
1064  MPI_Reduce (imag_local_values.data(),
1065  imag_v_local.data(), n,
1068  pid, this->comm().get());
1069 
1070  // copy real_v_local and imag_v_local to v_local
1071  for (PetscInt i=0; i<n; i++)
1072  v_local[i] = Complex(real_v_local[i], imag_v_local[i]);
1073  }
1074 }
1075 
1076 #endif
1077 
1078 
1079 
1080 template <typename T>
1082  const NumericVector<T> & vec2)
1083 {
1084  parallel_object_only();
1085 
1086  this->_restore_array();
1087 
1088  // Convert arguments to PetscVector*.
1089  const PetscVector<T> * vec1_petsc = cast_ptr<const PetscVector<T> *>(&vec1);
1090  const PetscVector<T> * vec2_petsc = cast_ptr<const PetscVector<T> *>(&vec2);
1091 
1092  // Call PETSc function.
1093  PetscErrorCode ierr = VecPointwiseMult(_vec, vec1_petsc->vec(), vec2_petsc->vec());
1094  LIBMESH_CHKERR(ierr);
1095 
1096  libmesh_assert(this->comm().verify(int(this->type())));
1097 
1098  if (this->type() == GHOSTED)
1099  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
1100 
1101  this->_is_closed = true;
1102 }
1103 
1104 template <typename T>
1106  const NumericVector<T> & vec2)
1107 {
1108  parallel_object_only();
1109 
1110  this->_restore_array();
1111 
1112  // Convert arguments to PetscVector*.
1113  const PetscVector<T> * const vec1_petsc = cast_ptr<const PetscVector<T> *>(&vec1);
1114  const PetscVector<T> * const vec2_petsc = cast_ptr<const PetscVector<T> *>(&vec2);
1115 
1116  // Call PETSc function.
1117  PetscErrorCode ierr = VecPointwiseDivide(_vec, vec1_petsc->vec(), vec2_petsc->vec());
1118  LIBMESH_CHKERR(ierr);
1119 
1120  libmesh_assert(this->comm().verify(int(this->type())));
1121 
1122  if (this->type() == GHOSTED)
1123  VecGhostUpdateBeginEnd(this->comm(), _vec, INSERT_VALUES, SCATTER_FORWARD);
1124 
1125  this->_is_closed = true;
1126 }
1127 
1128 template <typename T>
1129 void PetscVector<T>::print_matlab (const std::string & name) const
1130 {
1131  parallel_object_only();
1132 
1133  this->_restore_array();
1134  libmesh_assert (this->closed());
1135 
1136  PetscErrorCode ierr = 0;
1137 
1138  WrappedPetsc<PetscViewer> petsc_viewer;
1139  ierr = PetscViewerCreate (this->comm().get(), petsc_viewer.get());
1140  LIBMESH_CHKERR(ierr);
1141 
1142  // Create an ASCII file containing the matrix
1143  // if a filename was provided.
1144  if (name != "")
1145  {
1146  ierr = PetscViewerASCIIOpen( this->comm().get(),
1147  name.c_str(),
1148  petsc_viewer.get());
1149  LIBMESH_CHKERR(ierr);
1150 
1151 #if PETSC_VERSION_LESS_THAN(3,7,0)
1152  ierr = PetscViewerSetFormat (petsc_viewer,
1153  PETSC_VIEWER_ASCII_MATLAB);
1154 #else
1155  ierr = PetscViewerPushFormat (petsc_viewer,
1156  PETSC_VIEWER_ASCII_MATLAB);
1157 #endif
1158 
1159  LIBMESH_CHKERR(ierr);
1160 
1161  ierr = VecView (_vec, petsc_viewer);
1162  LIBMESH_CHKERR(ierr);
1163  }
1164 
1165  // Otherwise the matrix will be dumped to the screen.
1166  else
1167  {
1168 
1169 #if PETSC_VERSION_LESS_THAN(3,7,0)
1170  ierr = PetscViewerSetFormat (PETSC_VIEWER_STDOUT_WORLD,
1171  PETSC_VIEWER_ASCII_MATLAB);
1172 #else
1173  ierr = PetscViewerPushFormat (PETSC_VIEWER_STDOUT_WORLD,
1174  PETSC_VIEWER_ASCII_MATLAB);
1175 #endif
1176 
1177  LIBMESH_CHKERR(ierr);
1178 
1179  ierr = VecView (_vec, PETSC_VIEWER_STDOUT_WORLD);
1180  LIBMESH_CHKERR(ierr);
1181  }
1182 }
1183 
1184 
1185 
1186 
1187 
1188 template <typename T>
1190  const std::vector<numeric_index_type> & rows) const
1191 {
1192  parallel_object_only();
1193 
1194  this->_restore_array();
1195 
1196  // PETSc data structures
1197  PetscErrorCode ierr = 0;
1198 
1199  // Make sure the passed in subvector is really a PetscVector
1200  PetscVector<T> * petsc_subvector = cast_ptr<PetscVector<T> *>(&subvector);
1201 
1202  // If the petsc_subvector is already initialized, we assume that the
1203  // user has already allocated the *correct* amount of space for it.
1204  // If not, we use the appropriate PETSc routines to initialize it.
1205  if (!petsc_subvector->initialized())
1206  {
1207  // Initialize the petsc_subvector to have enough space to hold
1208  // the entries which will be scattered into it. Note: such an
1209  // init() function (where we let PETSc decide the number of local
1210  // entries) is not currently offered by the PetscVector
1211  // class. Should we differentiate here between sequential and
1212  // parallel vector creation based on this->n_processors() ?
1213  ierr = VecCreateMPI(this->comm().get(),
1214  PETSC_DECIDE, // n_local
1215  cast_int<PetscInt>(rows.size()), // n_global
1216  &(petsc_subvector->_vec));
1217  LIBMESH_CHKERR(ierr);
1218 
1219  ierr = VecSetFromOptions (petsc_subvector->_vec);
1220  LIBMESH_CHKERR(ierr);
1221 
1222  // Mark the subvector as initialized
1223  petsc_subvector->_is_initialized = true;
1224  }
1225  else
1226  {
1227  petsc_subvector->_restore_array();
1228  }
1229 
1230  // Use iota to fill an array with entries [0,1,2,3,4,...rows.size()]
1231  std::vector<PetscInt> idx(rows.size());
1232  std::iota (idx.begin(), idx.end(), 0);
1233 
1234  // Construct index sets
1235  WrappedPetsc<IS> parent_is;
1236  ierr = ISCreateGeneral(this->comm().get(),
1237  cast_int<PetscInt>(rows.size()),
1238  numeric_petsc_cast(rows.data()),
1239  PETSC_USE_POINTER,
1240  parent_is.get());
1241  LIBMESH_CHKERR(ierr);
1242 
1243  WrappedPetsc<IS> subvector_is;
1244  ierr = ISCreateGeneral(this->comm().get(),
1245  cast_int<PetscInt>(rows.size()),
1246  idx.data(),
1247  PETSC_USE_POINTER,
1248  subvector_is.get());
1249  LIBMESH_CHKERR(ierr);
1250 
1251  // Construct the scatter object
1252  WrappedPetsc<VecScatter> scatter;
1253  ierr = VecScatterCreate(this->_vec,
1254  parent_is,
1255  petsc_subvector->_vec,
1256  subvector_is,
1257  scatter.get()); LIBMESH_CHKERR(ierr);
1258 
1259  // Actually perform the scatter
1260  VecScatterBeginEnd(this->comm(), scatter, this->_vec, petsc_subvector->_vec, INSERT_VALUES, SCATTER_FORWARD);
1261 }
1262 
1263 
1264 
1265 template <typename T>
1266 void PetscVector<T>::_get_array(bool read_only) const
1267 {
1268  libmesh_assert (this->initialized());
1269 
1270  bool initially_array_is_present = _array_is_present.load(std::memory_order_acquire);
1271 
1272  // If we already have a read/write array - and we're trying
1273  // to get a read only array - let's just use the read write
1274  if (initially_array_is_present && read_only && !_values_read_only)
1275  _read_only_values = _values;
1276 
1277  // If the values have already been retrieved and we're currently
1278  // trying to get a non-read only view (ie read/write) and the
1279  // values are currently read only... then we need to restore
1280  // the array first... and then retrieve it again.
1281  if (initially_array_is_present && !read_only && _values_read_only)
1282  {
1283  _restore_array();
1284  initially_array_is_present = false;
1285  }
1286 
1287  if (!initially_array_is_present)
1288  {
1289  std::scoped_lock lock(_petsc_get_restore_array_mutex);
1290  if (!_array_is_present.load(std::memory_order_relaxed))
1291  {
1292  PetscErrorCode ierr=0;
1293  if (this->type() != GHOSTED)
1294  {
1295  if (read_only)
1296  {
1297  ierr = VecGetArrayRead(_vec, &_read_only_values);
1298  _values_read_only = true;
1299  }
1300  else
1301  {
1302  ierr = VecGetArray(_vec, &_values);
1303  _values_read_only = false;
1304  }
1305  LIBMESH_CHKERR(ierr);
1306  _local_size = this->local_size();
1307  }
1308  else
1309  {
1310  ierr = VecGhostGetLocalForm (_vec,&_local_form);
1311  LIBMESH_CHKERR(ierr);
1312 
1313  if (read_only)
1314  {
1315  ierr = VecGetArrayRead(_local_form, &_read_only_values);
1316  _values_read_only = true;
1317  }
1318  else
1319  {
1320  ierr = VecGetArray(_local_form, &_values);
1321  _values_read_only = false;
1322  }
1323  LIBMESH_CHKERR(ierr);
1324 
1325  PetscInt my_local_size = 0;
1326  ierr = VecGetLocalSize(_local_form, &my_local_size);
1327  LIBMESH_CHKERR(ierr);
1328  _local_size = static_cast<numeric_index_type>(my_local_size);
1329  }
1330 
1331  { // cache ownership range
1332  PetscInt petsc_first=0, petsc_last=0;
1333  ierr = VecGetOwnershipRange (_vec, &petsc_first, &petsc_last);
1334  LIBMESH_CHKERR(ierr);
1335  _first = static_cast<numeric_index_type>(petsc_first);
1336  _last = static_cast<numeric_index_type>(petsc_last);
1337  }
1338  _array_is_present.store(true, std::memory_order_release);
1339  }
1340  }
1341 }
1342 
1343 
1344 
1345 template <typename T>
1347 {
1348  libmesh_error_msg_if(_values_manually_retrieved,
1349  "PetscVector values were manually retrieved but are being automatically restored!");
1350 
1351  libmesh_assert (this->initialized());
1352  if (_array_is_present.load(std::memory_order_acquire))
1353  {
1354  std::scoped_lock lock(_petsc_get_restore_array_mutex);
1355  if (_array_is_present.load(std::memory_order_relaxed))
1356  {
1357  PetscErrorCode ierr=0;
1358  if (this->type() != GHOSTED)
1359  {
1360  if (_values_read_only)
1361  ierr = VecRestoreArrayRead (_vec, &_read_only_values);
1362  else
1363  ierr = VecRestoreArray (_vec, &_values);
1364 
1365  LIBMESH_CHKERR(ierr);
1366  _values = nullptr;
1367  }
1368  else
1369  {
1370  if (_values_read_only)
1371  ierr = VecRestoreArrayRead (_local_form, &_read_only_values);
1372  else
1373  ierr = VecRestoreArray (_local_form, &_values);
1374 
1375  LIBMESH_CHKERR(ierr);
1376  _values = nullptr;
1377  ierr = VecGhostRestoreLocalForm (_vec,&_local_form);
1378  LIBMESH_CHKERR(ierr);
1379  _local_form = nullptr;
1380  _local_size = 0;
1381  }
1382  _array_is_present.store(false, std::memory_order_release);
1383  }
1384  }
1385 }
1386 
1387 
1388 
1389 
1390 //------------------------------------------------------------------
1391 // Explicit instantiations
1392 template class LIBMESH_EXPORT PetscVector<Number>;
1393 
1394 } // namespace libMesh
1395 
1396 
1397 
1398 #endif // #ifdef LIBMESH_HAVE_PETSC
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:42
virtual Real l2_norm() const override
Definition: petsc_vector.C:77
bool closed()
Checks that the library has been closed.
Definition: libmesh.C:273
virtual void localize(std::vector< T > &v_local) const override
Creates a copy of the global vector in the local vector v_local.
Definition: petsc_vector.C:863
This class provides a nice interface to PETSc&#39;s Vec object.
Definition: petsc_vector.h:73
void add_vector_conjugate_transpose(const NumericVector< T > &v, const SparseMatrix< T > &A)
.
Definition: petsc_vector.C:299
virtual void add_vector_transpose(const NumericVector< T > &v, const SparseMatrix< T > &A) override
Computes , i.e.
Definition: petsc_vector.C:269
boost::multiprecision::float128 real(const boost::multiprecision::float128 in)
virtual numeric_index_type size() const override
Definition: petsc_vector.h:958
virtual Real linfty_norm() const override
Definition: petsc_vector.C:97
void scale(MeshBase &mesh, const Real xs, const Real ys=0., const Real zs=0.)
Scales the mesh.
virtual bool initialized() const
void _get_array(bool read_only) const
Queries the array (and the local form if the vector is ghosted) from PETSc.
virtual numeric_index_type local_size() const override
Definition: petsc_vector.h:978
virtual void add(const numeric_index_type i, const T value) override
Adds value to the vector entry specified by i.
Definition: petsc_vector.C:196
Vec _vec
Actual PETSc vector datatype to hold vector entries.
Definition: petsc_vector.h:354
Provides a uniform interface to vector storage schemes for different linear algebra libraries...
Definition: vector_fe_ex5.C:43
PetscScalar * pPS(T *ptr)
Definition: petsc_macro.h:172
bool _is_initialized
true once init() has been called.
The libMesh namespace provides an interface to certain functionality in the library.
virtual void pointwise_divide(const NumericVector< T > &vec1, const NumericVector< T > &vec2) override
Computes (summation not implied) i.e.
void iota(ForwardIter first, ForwardIter last, T value)
Utility::iota was created back when std::iota was just an SGI STL extension.
Definition: utility.h:229
void _restore_array() const
Restores the array (and the local form if the vector is ghosted) to PETSc.
uint8_t processor_id_type
Definition: id_types.h:104
PetscInt * numeric_petsc_cast(const numeric_index_type *p)
Generic sparse matrix.
Definition: vector_fe_ex5.C:45
PetscScalar PS(T val)
Definition: petsc_macro.h:166
virtual void insert(const T *v, const std::vector< numeric_index_type > &dof_indices) override
Inserts the entries of v in *this at the locations specified by v.
Definition: petsc_vector.C:390
virtual void pointwise_mult(const NumericVector< T > &vec1, const NumericVector< T > &vec2) override
Computes (summation not implied) i.e.
virtual Real l1_norm() const override
Definition: petsc_vector.C:60
virtual void abs() override
Sets for each entry in the vector.
Definition: petsc_vector.C:459
dof_id_type numeric_index_type
Definition: id_types.h:99
virtual NumericVector< T > & operator+=(const NumericVector< T > &v) override
Adds v to *this, .
Definition: petsc_vector.C:118
virtual void conjugate() override
Negates the imaginary component of each entry in the vector.
Definition: petsc_vector.C:182
libmesh_assert(ctx)
virtual void localize_to_one(std::vector< T > &v_local, const processor_id_type proc_id=0) const override
Creates a local copy of the global vector in v_local only on processor proc_id.
PetscErrorCode PetscInt const PetscInt IS * is
virtual void create_subvector(NumericVector< T > &subvector, const std::vector< numeric_index_type > &rows) const override
Fills in subvector from this vector using the indices in rows.
virtual bool closed() const
virtual T sum() const override
Definition: petsc_vector.C:44
std::complex< Real > Complex
ParallelType type() const
virtual void set(const numeric_index_type i, const T value) override
Sets v(i) = value.
Definition: petsc_vector.C:149
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
T indefinite_dot(const NumericVector< T > &v) const
Definition: petsc_vector.C:498
virtual void init(const numeric_index_type N, const numeric_index_type n_local, const bool fast=false, const ParallelType type=AUTOMATIC) override
Change the dimension of the vector to n.
Definition: petsc_vector.h:669
static const bool value
Definition: xdr_io.C:54
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:134
This class provides a nice interface to the PETSc C-based data structures for parallel, sparse matrices.
Definition: petsc_matrix.h:93
bool initialized()
Checks that library initialization has been done.
Definition: libmesh.C:266
PetscVector< T > & operator=(const PetscVector< T > &v)
Copy assignment operator.
Definition: petsc_vector.C:566
void * ctx
virtual void reciprocal() override
Computes the component-wise reciprocal, .
Definition: petsc_vector.C:168
virtual void add_vector(const T *v, const std::vector< numeric_index_type > &dof_indices) override
Computes , where v is a pointer and each dof_indices[i] specifies where to add value v[i]...
Definition: petsc_vector.C:215
virtual NumericVector< T > & operator*=(const NumericVector< T > &v) override
Computes the component-wise multiplication of this vector&#39;s entries by another&#39;s, ...
Definition: petsc_vector.C:429
boost::multiprecision::float128 imag(const boost::multiprecision::float128)
virtual void print_matlab(const std::string &name="") const override
Print the contents of the vector in Matlab&#39;s sparse matrix format.
virtual void scale(const T factor) override
Scale each element of the vector by the given factor.
Definition: petsc_vector.C:411
virtual void close() override
Calls the NumericVector&#39;s internal assembly routines, ensuring that the values are consistent across ...
Definition: petsc_vector.h:855
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
virtual NumericVector< T > & operator/=(const NumericVector< T > &v) override
Computes the component-wise division of this vector&#39;s entries by another&#39;s, .
Definition: petsc_vector.C:444
virtual NumericVector< T > & operator-=(const NumericVector< T > &v) override
Subtracts v from *this, .
Definition: petsc_vector.C:134
virtual T dot(const NumericVector< T > &v) const override
Definition: petsc_vector.C:475