22#ifndef PRECONDITIONER_AMG
23#define PRECONDITIONER_AMG
25#include <deal.II/lac/la_parallel_vector.h>
26#include <deal.II/lac/petsc_precondition.h>
27#include <deal.II/lac/petsc_solver.h>
28#include <deal.II/lac/petsc_sparse_matrix.h>
29#include <deal.II/lac/trilinos_precondition.h>
30#include <deal.II/lac/trilinos_sparse_matrix.h>
32#ifdef DEAL_II_WITH_TRILINOS
33# include <ml_MultiLevelPreconditioner.h>
36#include <exadg/solvers_and_preconditioners/multigrid/multigrid_parameters.h>
37#include <exadg/solvers_and_preconditioners/preconditioners/preconditioner_base.h>
38#include <exadg/solvers_and_preconditioners/solvers/iterative_solvers_dealii_wrapper.h>
39#include <exadg/solvers_and_preconditioners/utilities/linear_algebra_utilities.h>
40#include <exadg/utilities/print_functions.h>
44template<
int dim,
int spacedim>
45std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)>
46create_subcommunicator(dealii::DoFHandler<dim, spacedim>
const & dof_handler)
48 unsigned int n_locally_owned_cells = 0;
49 for(
auto const & cell : dof_handler.active_cell_iterators())
50 if(cell->is_locally_owned())
51 ++n_locally_owned_cells;
53 MPI_Comm
const mpi_comm = dof_handler.get_communicator();
61 if(dealii::Utilities::MPI::min(n_locally_owned_cells, mpi_comm) == 0)
63 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator(
new MPI_Comm,
68 MPI_Comm_split(mpi_comm,
69 n_locally_owned_cells > 0,
70 dealii::Utilities::MPI::this_mpi_process(mpi_comm),
71 subcommunicator.get());
73 return subcommunicator;
77 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> communicator(
new MPI_Comm, [](MPI_Comm * comm) {
80 *communicator = mpi_comm;
86#ifdef DEAL_II_WITH_TRILINOS
87inline Teuchos::ParameterList
88get_ML_parameter_list(dealii::TrilinosWrappers::PreconditionAMG::AdditionalData
const & ml_data,
91 Teuchos::ParameterList parameter_list;
94 if(ml_data.elliptic ==
true)
96 ML_Epetra::SetDefaults(
"SA", parameter_list);
100 ML_Epetra::SetDefaults(
"NSSA", parameter_list);
101 parameter_list.set(
"aggregation: block scaling",
true);
103 parameter_list.set(
"aggregation: type",
"Uncoupled");
104 parameter_list.set(
"smoother: type", ml_data.smoother_type);
105 parameter_list.set(
"coarse: type", ml_data.coarse_type);
109# if DEAL_II_TRILINOS_VERSION_GTE(12, 4, 0)
110 parameter_list.set(
"initialize random seed",
true);
113 parameter_list.set(
"smoother: sweeps",
static_cast<int>(ml_data.smoother_sweeps));
114 parameter_list.set(
"cycle applications",
static_cast<int>(ml_data.n_cycles));
115 if(ml_data.w_cycle ==
true)
117 parameter_list.set(
"prec type",
"MGW");
121 parameter_list.set(
"prec type",
"MGV");
124 parameter_list.set(
"smoother: Chebyshev alpha", 10.);
125 parameter_list.set(
"smoother: ifpack overlap",
static_cast<int>(ml_data.smoother_overlap));
126 parameter_list.set(
"aggregation: threshold", ml_data.aggregation_threshold);
130 parameter_list.set(
"coarse: max size", 2000);
133 parameter_list.set(
"repartition: enable", 1);
134 parameter_list.set(
"repartition: max min ratio", 1.3);
135 parameter_list.set(
"repartition: min per proc", 300);
136 parameter_list.set(
"repartition: partitioner",
"Zoltan");
137 parameter_list.set(
"repartition: Zoltan dimensions", dimension);
139 if(ml_data.output_details)
141 parameter_list.set(
"ML output", 10);
145 parameter_list.set(
"ML output", 0);
148 return parameter_list;
151template<
typename Operator>
155 typedef dealii::LinearAlgebra::distributed::Vector<double> VectorType;
157 typedef dealii::TrilinosWrappers::PreconditionAMG::AdditionalData MLData;
161 dealii::TrilinosWrappers::SparseMatrix system_matrix;
164 dealii::TrilinosWrappers::PreconditionAMG amg;
167 PreconditionerML(Operator
const & op,
bool const initialize, MLData ml_data = MLData())
168 : pde_operator(op), ml_data(ml_data)
171 pde_operator.init_system_matrix(system_matrix,
172 op.get_matrix_free().get_dof_handler().get_communicator());
181 vmult(VectorType & dst, VectorType
const & src)
const override
187 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
188 VectorType
const & src,
189 MultigridCoarseGridSolver
const & solver_type,
190 SolverData
const & solver_data)
const
192 dealii::ReductionControl solver_control(solver_data.max_iter,
194 solver_data.rel_tol);
196 if(solver_type == MultigridCoarseGridSolver::CG)
198 dealii::SolverCG<VectorType> solver(solver_control);
199 solver.solve(system_matrix, dst, src, *
this);
201 else if(solver_type == MultigridCoarseGridSolver::GMRES)
203 typename dealii::SolverGMRES<VectorType>::AdditionalData gmres_data;
204 gmres_data.max_n_tmp_vectors = solver_data.max_krylov_size;
205 gmres_data.right_preconditioning =
true;
207 dealii::SolverGMRES<VectorType> solver(solver_control, gmres_data);
208 solver.solve(system_matrix, dst, src, *
this);
212 AssertThrow(
false, dealii::ExcMessage(
"Not implemented."));
220 system_matrix *= 0.0;
223 pde_operator.calculate_system_matrix(system_matrix);
226 unsigned int const dimension = pde_operator.get_matrix_free().dimension;
227 Teuchos::ParameterList parameter_list = get_ML_parameter_list(ml_data, dimension);
232 std::vector<std::vector<bool>> constant_modes;
233 std::vector<std::vector<double>> constant_modes_values;
234 pde_operator.get_constant_modes(constant_modes, constant_modes_values);
235 MPI_Comm
const & mpi_comm = system_matrix.get_mpi_communicator();
236 bool const some_constant_mode_set =
237 dealii::Utilities::MPI::logical_or(constant_modes.size() > 0, mpi_comm);
238 if(not some_constant_mode_set)
240 bool const some_constant_mode_values_set =
241 dealii::Utilities::MPI::logical_or(constant_modes_values.size() > 0, mpi_comm);
242 AssertThrow(some_constant_mode_values_set > 0,
244 "Neither `constant_modes` nor `constant_modes_values` were provided. "
245 "AMG setup requires near null space basis vectors."));
246 ml_data.constant_modes_values = constant_modes_values;
250 ml_data.constant_modes = constant_modes;
255 std::unique_ptr<Epetra_MultiVector> ptr_operator_modes;
256 ml_data.set_operator_null_space(parameter_list,
258 system_matrix.trilinos_matrix());
261 amg.initialize(system_matrix, parameter_list);
263 this->update_needed =
false;
268 Operator
const & pde_operator;
274#ifdef DEAL_II_WITH_PETSC
278template<
typename Operator,
typename Number>
282 typedef dealii::LinearAlgebra::distributed::Vector<Number> VectorType;
284 typedef dealii::PETScWrappers::PreconditionBoomerAMG::AdditionalData BoomerData;
288 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator;
292 dealii::PETScWrappers::MPI::SparseMatrix system_matrix;
295 dealii::PETScWrappers::PreconditionBoomerAMG amg;
297 PreconditionerBoomerAMG(Operator
const & op,
298 bool const initialize,
299 BoomerData boomer_data = BoomerData())
301 create_subcommunicator(op.get_matrix_free().get_dof_handler(op.get_dof_index()))),
303 boomer_data(boomer_data)
306 pde_operator.init_system_matrix(system_matrix, *subcommunicator);
314 ~PreconditionerBoomerAMG()
316 if(system_matrix.m() > 0)
318 PetscErrorCode ierr = VecDestroy(&petsc_vector_dst);
319 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
320 ierr = VecDestroy(&petsc_vector_src);
321 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
326 vmult(VectorType & dst, VectorType
const & src)
const override
328 if(system_matrix.m() > 0)
329 apply_petsc_operation(dst,
333 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
334 dealii::PETScWrappers::VectorBase
const & petsc_src) {
335 amg.vmult(petsc_dst, petsc_src);
340 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
341 VectorType
const & src,
342 MultigridCoarseGridSolver
const & solver_type,
343 SolverData
const & solver_data)
const
345 apply_petsc_operation(dst,
347 system_matrix.get_mpi_communicator(),
348 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
349 dealii::PETScWrappers::VectorBase
const & petsc_src) {
350 dealii::ReductionControl solver_control(solver_data.max_iter,
352 solver_data.rel_tol);
354 if(solver_type == MultigridCoarseGridSolver::CG)
356 dealii::PETScWrappers::SolverCG solver(solver_control);
357 solver.solve(system_matrix, petsc_dst, petsc_src, amg);
359 else if(solver_type == MultigridCoarseGridSolver::GMRES)
361 dealii::PETScWrappers::SolverGMRES solver(solver_control);
362 solver.solve(system_matrix, petsc_dst, petsc_src, amg);
366 AssertThrow(false, dealii::ExcMessage(
"Not implemented."));
378 if(system_matrix.m() > 0)
381 calculate_preconditioner();
383 this->update_needed =
false;
388 calculate_preconditioner()
391 if(system_matrix.m() > 0)
393 pde_operator.calculate_system_matrix(system_matrix);
395 amg.initialize(system_matrix, boomer_data);
398 dealii::LinearAlgebra::distributed::Vector<typename Operator::value_type> vector;
399 pde_operator.initialize_dof_vector(vector);
400 VecCreateMPI(system_matrix.get_mpi_communicator(),
401 vector.get_partitioner()->locally_owned_size(),
404 VecCreateMPI(system_matrix.get_mpi_communicator(),
405 vector.get_partitioner()->locally_owned_size(),
412 Operator
const & pde_operator;
414 BoomerData boomer_data;
417 mutable Vec petsc_vector_src;
418 mutable Vec petsc_vector_dst;
425template<
typename Operator,
typename Number>
426class PreconditionerAMG :
public PreconditionerBase<Number>
429 typedef typename PreconditionerBase<Number>::VectorType VectorType;
432 PreconditionerAMG(Operator
const & pde_operator,
bool const initialize,
AMGData const & data)
438 if(data.amg_type == AMGType::BoomerAMG)
440#ifdef DEAL_II_WITH_PETSC
441 preconditioner_boomer =
442 std::make_shared<PreconditionerBoomerAMG<Operator, Number>>(pde_operator,
446 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with PETSc!"));
449 else if(data.amg_type == AMGType::ML)
451#ifdef DEAL_II_WITH_TRILINOS
453 std::make_shared<PreconditionerML<Operator>>(pde_operator, initialize, data.ml_data);
455 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with Trilinos!"));
460 AssertThrow(
false, dealii::ExcNotImplemented());
465 vmult(VectorType & dst, VectorType
const & src)
const final
467 if(data.amg_type == AMGType::BoomerAMG)
469#ifdef DEAL_II_WITH_PETSC
470 preconditioner_boomer->vmult(dst, src);
472 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with PETSc!"));
475 else if(data.amg_type == AMGType::ML)
477#ifdef DEAL_II_WITH_TRILINOS
481 [&](dealii::LinearAlgebra::distributed::Vector<double> & dst_double,
482 dealii::LinearAlgebra::distributed::Vector<double>
const & src_double) {
483 preconditioner_ml->vmult(dst_double, src_double);
486 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with Trilinos!"));
491 AssertThrow(
false, dealii::ExcNotImplemented());
496 apply_krylov_solver_with_amg_preconditioner(VectorType & dst,
497 VectorType
const & src,
498 MultigridCoarseGridSolver
const & solver_type,
501 if(data.amg_type == AMGType::BoomerAMG)
503#ifdef DEAL_II_WITH_PETSC
504 std::shared_ptr<PreconditionerBoomerAMG<Operator, Number>> preconditioner =
505 std::dynamic_pointer_cast<PreconditionerBoomerAMG<Operator, Number>>(preconditioner_boomer);
507 preconditioner->apply_krylov_solver_with_amg_preconditioner(dst,
512 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with PETSc!"));
515 else if(data.amg_type == AMGType::ML)
517#ifdef DEAL_II_WITH_TRILINOS
518 std::shared_ptr<PreconditionerML<Operator>> preconditioner =
519 std::dynamic_pointer_cast<PreconditionerML<Operator>>(preconditioner_ml);
524 [&](dealii::LinearAlgebra::distributed::Vector<double> & dst_double,
525 dealii::LinearAlgebra::distributed::Vector<double>
const & src_double) {
526 preconditioner->apply_krylov_solver_with_amg_preconditioner(dst_double,
532 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with Trilinos!"));
537 AssertThrow(
false, dealii::ExcNotImplemented());
544 if(data.amg_type == AMGType::BoomerAMG)
546#ifdef DEAL_II_WITH_PETSC
547 preconditioner_boomer->update();
549 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with PETSc!"));
552 else if(data.amg_type == AMGType::ML)
554#ifdef DEAL_II_WITH_TRILINOS
555 preconditioner_ml->update();
557 AssertThrow(
false, dealii::ExcMessage(
"deal.II is not compiled with Trilinos!"));
562 AssertThrow(
false, dealii::ExcNotImplemented());
565 this->update_needed =
false;
571 std::shared_ptr<PreconditionerBase<Number>> preconditioner_boomer;
573 std::shared_ptr<PreconditionerBase<double>> preconditioner_ml;
Definition preconditioner_base.h:35
void apply_function_in_double_precision(dealii::LinearAlgebra::distributed::Vector< Number > &dst, dealii::LinearAlgebra::distributed::Vector< Number > const &src, std::function< void(dealii::LinearAlgebra::distributed::Vector< double > &, dealii::LinearAlgebra::distributed::Vector< double > const &)> operation)
Definition linear_algebra_utilities.h:140
Definition multigrid_parameters.h:98
Definition solver_data.h:34