ExaDG
Loading...
Searching...
No Matches
preconditioner_amg.h
1/* ______________________________________________________________________
2 *
3 * ExaDG - High-Order Discontinuous Galerkin for the Exa-Scale
4 *
5 * Copyright (C) 2021 by the ExaDG authors
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <https://www.gnu.org/licenses/>.
19 * ______________________________________________________________________
20 */
21
22#ifndef PRECONDITIONER_AMG
23#define PRECONDITIONER_AMG
24
25#include <deal.II/lac/la_parallel_vector.h>
26#include <deal.II/lac/petsc_precondition.h>
27#include <deal.II/lac/petsc_solver.h>
28#include <deal.II/lac/petsc_sparse_matrix.h>
29#include <deal.II/lac/trilinos_precondition.h>
30#include <deal.II/lac/trilinos_sparse_matrix.h>
31
32#ifdef DEAL_II_WITH_TRILINOS
33# include <ml_MultiLevelPreconditioner.h>
34#endif
35
36#include <exadg/solvers_and_preconditioners/multigrid/multigrid_parameters.h>
37#include <exadg/solvers_and_preconditioners/preconditioners/preconditioner_base.h>
38#include <exadg/solvers_and_preconditioners/solvers/iterative_solvers_dealii_wrapper.h>
39#include <exadg/solvers_and_preconditioners/utilities/linear_algebra_utilities.h>
40#include <exadg/utilities/print_functions.h>
41
42namespace ExaDG
43{
44template<int dim, int spacedim>
45std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)>
46create_subcommunicator(dealii::DoFHandler<dim, spacedim> const & dof_handler)
47{
48 unsigned int n_locally_owned_cells = 0;
49 for(auto const & cell : dof_handler.active_cell_iterators())
50 if(cell->is_locally_owned())
51 ++n_locally_owned_cells;
52
53 MPI_Comm const mpi_comm = dof_handler.get_communicator();
54
55 // In case some of the MPI ranks do not have cells, we create a
56 // sub-communicator to exclude all those processes from the MPI
57 // communication in the matrix-based operations and hence speed up those
58 // operations. Note that we have to free the communicator again, which is
59 // done by a custom deleter of the unique pointer that is run when it goes
60 // out of scope.
61 if(dealii::Utilities::MPI::min(n_locally_owned_cells, mpi_comm) == 0)
62 {
63 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator(new MPI_Comm,
64 [](MPI_Comm * comm) {
65 MPI_Comm_free(comm);
66 delete comm;
67 });
68 MPI_Comm_split(mpi_comm,
69 n_locally_owned_cells > 0,
70 dealii::Utilities::MPI::this_mpi_process(mpi_comm),
71 subcommunicator.get());
72
73 return subcommunicator;
74 }
75 else
76 {
77 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> communicator(new MPI_Comm, [](MPI_Comm * comm) {
78 delete comm;
79 });
80 *communicator = mpi_comm;
81
82 return communicator;
83 }
84}
85
86#ifdef DEAL_II_WITH_TRILINOS
87inline Teuchos::ParameterList
88get_ML_parameter_list(dealii::TrilinosWrappers::PreconditionAMG::AdditionalData const & ml_data,
89 int const dimension)
90{
91 Teuchos::ParameterList parameter_list;
92
93 // Slightly modified from deal::PreconditionAMG::AdditionalData::set_parameters().
94 if(ml_data.elliptic == true)
95 {
96 ML_Epetra::SetDefaults("SA", parameter_list);
97 }
98 else
99 {
100 ML_Epetra::SetDefaults("NSSA", parameter_list);
101 parameter_list.set("aggregation: block scaling", true);
102 }
103 parameter_list.set("aggregation: type", "Uncoupled");
104 parameter_list.set("smoother: type", ml_data.smoother_type);
105 parameter_list.set("coarse: type", ml_data.coarse_type);
106
107// Force re-initialization of the random seed to make ML deterministic
108// (only supported in trilinos >12.2):
109# if DEAL_II_TRILINOS_VERSION_GTE(12, 4, 0)
110 parameter_list.set("initialize random seed", true);
111# endif
112
113 parameter_list.set("smoother: sweeps", static_cast<int>(ml_data.smoother_sweeps));
114 parameter_list.set("cycle applications", static_cast<int>(ml_data.n_cycles));
115 if(ml_data.w_cycle == true)
116 {
117 parameter_list.set("prec type", "MGW");
118 }
119 else
120 {
121 parameter_list.set("prec type", "MGV");
122 }
123
124 parameter_list.set("smoother: Chebyshev alpha", 10.);
125 parameter_list.set("smoother: ifpack overlap", static_cast<int>(ml_data.smoother_overlap));
126 parameter_list.set("aggregation: threshold", ml_data.aggregation_threshold);
127
128 // Minimum size of the coarse problem, i.e. no coarser problems
129 // smaller than `coarse: max size` are constructed.
130 parameter_list.set("coarse: max size", 2000);
131
132 // This extends the settings in deal::PreconditionAMG::AdditionalData::set_parameters().
133 parameter_list.set("repartition: enable", 1);
134 parameter_list.set("repartition: max min ratio", 1.3);
135 parameter_list.set("repartition: min per proc", 300);
136 parameter_list.set("repartition: partitioner", "Zoltan");
137 parameter_list.set("repartition: Zoltan dimensions", dimension);
138
139 if(ml_data.output_details)
140 {
141 parameter_list.set("ML output", 10);
142 }
143 else
144 {
145 parameter_list.set("ML output", 0);
146 }
147
148 return parameter_list;
149}
150
151template<typename Operator>
152class PreconditionerML : public PreconditionerBase<double>
153{
154private:
155 typedef dealii::LinearAlgebra::distributed::Vector<double> VectorType;
156
157 typedef dealii::TrilinosWrappers::PreconditionAMG::AdditionalData MLData;
158
159public:
160 // distributed sparse system matrix
161 dealii::TrilinosWrappers::SparseMatrix system_matrix;
162
163private:
164 dealii::TrilinosWrappers::PreconditionAMG amg;
165
166public:
167 PreconditionerML(Operator const & op, bool const initialize, MLData ml_data = MLData())
168 : pde_operator(op), ml_data(ml_data)
169 {
170 // initialize system matrix
171 pde_operator.init_system_matrix(system_matrix,
172 op.get_matrix_free().get_dof_handler().get_communicator());
173
174 if(initialize)
175 {
176 this->update();
177 }
178 }
179
180 void
181 vmult(VectorType & dst, VectorType const & src) const override
182 {
183 amg.vmult(dst, src);
184 }
185
186 void
187 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
188 VectorType const & src,
189 MultigridCoarseGridSolver const & solver_type,
190 SolverData const & solver_data) const
191 {
192 dealii::ReductionControl solver_control(solver_data.max_iter,
193 solver_data.abs_tol,
194 solver_data.rel_tol);
195
196 if(solver_type == MultigridCoarseGridSolver::CG)
197 {
198 dealii::SolverCG<VectorType> solver(solver_control);
199 solver.solve(system_matrix, dst, src, *this);
200 }
201 else if(solver_type == MultigridCoarseGridSolver::GMRES)
202 {
203 typename dealii::SolverGMRES<VectorType>::AdditionalData gmres_data;
204 gmres_data.max_n_tmp_vectors = solver_data.max_krylov_size;
205 gmres_data.right_preconditioning = true;
206
207 dealii::SolverGMRES<VectorType> solver(solver_control, gmres_data);
208 solver.solve(system_matrix, dst, src, *this);
209 }
210 else
211 {
212 AssertThrow(false, dealii::ExcMessage("Not implemented."));
213 }
214 }
215
216 void
217 update() override
218 {
219 // Clear content of matrix since calculate_system_matrix() adds the result.
220 system_matrix *= 0.0;
221
222 // Re-calculate the system matrix.
223 pde_operator.calculate_system_matrix(system_matrix);
224
225 // Construct AMG preconditioner based on `Teuchos::ParameterList`.
226 unsigned int const dimension = pde_operator.get_matrix_free().dimension;
227 Teuchos::ParameterList parameter_list = get_ML_parameter_list(ml_data, dimension);
228
229 // Add near null space basis vectors to `Teuchos::ParameterList`.
230 // If the `std::vector<std::vector<double>> constant_modes_values`
231 // were filled, use these, otherwise use `std::vector<std::vector<bool>> constant_modes`.
232 std::vector<std::vector<bool>> constant_modes;
233 std::vector<std::vector<double>> constant_modes_values;
234 pde_operator.get_constant_modes(constant_modes, constant_modes_values);
235 MPI_Comm const & mpi_comm = system_matrix.get_mpi_communicator();
236 bool const some_constant_mode_set =
237 dealii::Utilities::MPI::logical_or(constant_modes.size() > 0, mpi_comm);
238 if(not some_constant_mode_set)
239 {
240 bool const some_constant_mode_values_set =
241 dealii::Utilities::MPI::logical_or(constant_modes_values.size() > 0, mpi_comm);
242 AssertThrow(some_constant_mode_values_set > 0,
243 dealii::ExcMessage(
244 "Neither `constant_modes` nor `constant_modes_values` were provided. "
245 "AMG setup requires near null space basis vectors."));
246 ml_data.constant_modes_values = constant_modes_values;
247 }
248 else
249 {
250 ml_data.constant_modes = constant_modes;
251 }
252
253 // Add near null space basis vectors to Teuchos::ParameterList.
254 // `ptr_distributed_modes` must stay alive for amg.initialize()
255 std::unique_ptr<Epetra_MultiVector> ptr_operator_modes;
256 ml_data.set_operator_null_space(parameter_list,
257 ptr_operator_modes,
258 system_matrix.trilinos_matrix());
259
260 // Initialize with the `Teuchos::ParameterList`.
261 amg.initialize(system_matrix, parameter_list);
262
263 this->update_needed = false;
264 }
265
266private:
267 // reference to matrix-free operator
268 Operator const & pde_operator;
269
270 MLData ml_data;
271};
272#endif
273
274#ifdef DEAL_II_WITH_PETSC
275/*
276 * Wrapper class for BoomerAMG from Hypre
277 */
278template<typename Operator, typename Number>
279class PreconditionerBoomerAMG : public PreconditionerBase<Number>
280{
281private:
282 typedef dealii::LinearAlgebra::distributed::Vector<Number> VectorType;
283
284 typedef dealii::PETScWrappers::PreconditionBoomerAMG::AdditionalData BoomerData;
285
286 // subcommunicator; declared before the matrix to ensure that it gets
287 // deleted after the matrix and preconditioner depending on it
288 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator;
289
290public:
291 // distributed sparse system matrix
292 dealii::PETScWrappers::MPI::SparseMatrix system_matrix;
293
294 // amg preconditioner for access by PETSc solver
295 dealii::PETScWrappers::PreconditionBoomerAMG amg;
296
297 PreconditionerBoomerAMG(Operator const & op,
298 bool const initialize,
299 BoomerData boomer_data = BoomerData())
300 : subcommunicator(
301 create_subcommunicator(op.get_matrix_free().get_dof_handler(op.get_dof_index()))),
302 pde_operator(op),
303 boomer_data(boomer_data)
304 {
305 // initialize system matrix
306 pde_operator.init_system_matrix(system_matrix, *subcommunicator);
307
308 if(initialize)
309 {
310 this->update();
311 }
312 }
313
314 ~PreconditionerBoomerAMG()
315 {
316 if(system_matrix.m() > 0)
317 {
318 PetscErrorCode ierr = VecDestroy(&petsc_vector_dst);
319 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
320 ierr = VecDestroy(&petsc_vector_src);
321 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
322 }
323 }
324
325 void
326 vmult(VectorType & dst, VectorType const & src) const override
327 {
328 if(system_matrix.m() > 0)
329 apply_petsc_operation(dst,
330 src,
331 petsc_vector_dst,
332 petsc_vector_src,
333 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
334 dealii::PETScWrappers::VectorBase const & petsc_src) {
335 amg.vmult(petsc_dst, petsc_src);
336 });
337 }
338
339 void
340 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
341 VectorType const & src,
342 MultigridCoarseGridSolver const & solver_type,
343 SolverData const & solver_data) const
344 {
345 apply_petsc_operation(dst,
346 src,
347 system_matrix.get_mpi_communicator(),
348 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
349 dealii::PETScWrappers::VectorBase const & petsc_src) {
350 dealii::ReductionControl solver_control(solver_data.max_iter,
351 solver_data.abs_tol,
352 solver_data.rel_tol);
353
354 if(solver_type == MultigridCoarseGridSolver::CG)
355 {
356 dealii::PETScWrappers::SolverCG solver(solver_control);
357 solver.solve(system_matrix, petsc_dst, petsc_src, amg);
358 }
359 else if(solver_type == MultigridCoarseGridSolver::GMRES)
360 {
361 dealii::PETScWrappers::SolverGMRES solver(solver_control);
362 solver.solve(system_matrix, petsc_dst, petsc_src, amg);
363 }
364 else
365 {
366 AssertThrow(false, dealii::ExcMessage("Not implemented."));
367 }
368 });
369 }
370
371 void
372 update() override
373 {
374 // clear content of matrix since the next calculate_system_matrix calls
375 // add their result; since we might run this on a sub-communicator, we
376 // skip the processes that do not participate in the matrix and have size
377 // zero
378 if(system_matrix.m() > 0)
379 system_matrix = 0.0;
380
381 calculate_preconditioner();
382
383 this->update_needed = false;
384 }
385
386private:
387 void
388 calculate_preconditioner()
389 {
390 // calculate_matrix in case the current MPI rank participates in the PETSc communicator
391 if(system_matrix.m() > 0)
392 {
393 pde_operator.calculate_system_matrix(system_matrix);
394
395 amg.initialize(system_matrix, boomer_data);
396
397 // get vector partitioner
398 dealii::LinearAlgebra::distributed::Vector<typename Operator::value_type> vector;
399 pde_operator.initialize_dof_vector(vector);
400 VecCreateMPI(system_matrix.get_mpi_communicator(),
401 vector.get_partitioner()->locally_owned_size(),
402 PETSC_DETERMINE,
403 &petsc_vector_dst);
404 VecCreateMPI(system_matrix.get_mpi_communicator(),
405 vector.get_partitioner()->locally_owned_size(),
406 PETSC_DETERMINE,
407 &petsc_vector_src);
408 }
409 }
410
411 // reference to MultigridOperator
412 Operator const & pde_operator;
413
414 BoomerData boomer_data;
415
416 // PETSc vector objects to avoid re-allocation in every vmult() operation
417 mutable Vec petsc_vector_src;
418 mutable Vec petsc_vector_dst;
419};
420#endif
421
425template<typename Operator, typename Number>
426class PreconditionerAMG : public PreconditionerBase<Number>
427{
428private:
429 typedef typename PreconditionerBase<Number>::VectorType VectorType;
430
431public:
432 PreconditionerAMG(Operator const & pde_operator, bool const initialize, AMGData const & data)
433 {
434 (void)pde_operator;
435 (void)initialize;
436 this->data = data;
437
438 if(data.amg_type == AMGType::BoomerAMG)
439 {
440#ifdef DEAL_II_WITH_PETSC
441 preconditioner_boomer =
442 std::make_shared<PreconditionerBoomerAMG<Operator, Number>>(pde_operator,
443 initialize,
444 data.boomer_data);
445#else
446 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
447#endif
448 }
449 else if(data.amg_type == AMGType::ML)
450 {
451#ifdef DEAL_II_WITH_TRILINOS
452 preconditioner_ml =
453 std::make_shared<PreconditionerML<Operator>>(pde_operator, initialize, data.ml_data);
454#else
455 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
456#endif
457 }
458 else
459 {
460 AssertThrow(false, dealii::ExcNotImplemented());
461 }
462 }
463
464 void
465 vmult(VectorType & dst, VectorType const & src) const final
466 {
467 if(data.amg_type == AMGType::BoomerAMG)
468 {
469#ifdef DEAL_II_WITH_PETSC
470 preconditioner_boomer->vmult(dst, src);
471#else
472 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
473#endif
474 }
475 else if(data.amg_type == AMGType::ML)
476 {
477#ifdef DEAL_II_WITH_TRILINOS
479 dst,
480 src,
481 [&](dealii::LinearAlgebra::distributed::Vector<double> & dst_double,
482 dealii::LinearAlgebra::distributed::Vector<double> const & src_double) {
483 preconditioner_ml->vmult(dst_double, src_double);
484 });
485#else
486 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
487#endif
488 }
489 else
490 {
491 AssertThrow(false, dealii::ExcNotImplemented());
492 }
493 }
494
495 void
496 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
497 VectorType const & src,
498 MultigridCoarseGridSolver const & solver_type,
499 SolverData const & solver_data) const
500 {
501 if(data.amg_type == AMGType::BoomerAMG)
502 {
503#ifdef DEAL_II_WITH_PETSC
504 std::shared_ptr<PreconditionerBoomerAMG<Operator, Number>> preconditioner =
505 std::dynamic_pointer_cast<PreconditionerBoomerAMG<Operator, Number>>(preconditioner_boomer);
506
507 preconditioner->apply_krylov_solver_with_amg_preconditioner(dst,
508 src,
509 solver_type,
510 solver_data);
511#else
512 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
513#endif
514 }
515 else if(data.amg_type == AMGType::ML)
516 {
517#ifdef DEAL_II_WITH_TRILINOS
518 std::shared_ptr<PreconditionerML<Operator>> preconditioner =
519 std::dynamic_pointer_cast<PreconditionerML<Operator>>(preconditioner_ml);
520
522 dst,
523 src,
524 [&](dealii::LinearAlgebra::distributed::Vector<double> & dst_double,
525 dealii::LinearAlgebra::distributed::Vector<double> const & src_double) {
526 preconditioner->apply_krylov_solver_with_amg_preconditioner(dst_double,
527 src_double,
528 solver_type,
529 solver_data);
530 });
531#else
532 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
533#endif
534 }
535 else
536 {
537 AssertThrow(false, dealii::ExcNotImplemented());
538 }
539 }
540
541 void
542 update() final
543 {
544 if(data.amg_type == AMGType::BoomerAMG)
545 {
546#ifdef DEAL_II_WITH_PETSC
547 preconditioner_boomer->update();
548#else
549 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
550#endif
551 }
552 else if(data.amg_type == AMGType::ML)
553 {
554#ifdef DEAL_II_WITH_TRILINOS
555 preconditioner_ml->update();
556#else
557 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
558#endif
559 }
560 else
561 {
562 AssertThrow(false, dealii::ExcNotImplemented());
563 }
564
565 this->update_needed = false;
566 }
567
568private:
569 AMGData data;
570
571 std::shared_ptr<PreconditionerBase<Number>> preconditioner_boomer;
572
573 std::shared_ptr<PreconditionerBase<double>> preconditioner_ml;
574};
575
576} // namespace ExaDG
577
578#endif
Definition preconditioner_base.h:35
Definition driver.cpp:33
void apply_function_in_double_precision(dealii::LinearAlgebra::distributed::Vector< Number > &dst, dealii::LinearAlgebra::distributed::Vector< Number > const &src, std::function< void(dealii::LinearAlgebra::distributed::Vector< double > &, dealii::LinearAlgebra::distributed::Vector< double > const &)> operation)
Definition linear_algebra_utilities.h:140
Definition multigrid_parameters.h:98
Definition solver_data.h:34