ExaDG
Loading...
Searching...
No Matches
coarse_grid_solvers.h
1/* ______________________________________________________________________
2 *
3 * ExaDG - High-Order Discontinuous Galerkin for the Exa-Scale
4 *
5 * Copyright (C) 2021 by the ExaDG authors
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <https://www.gnu.org/licenses/>.
19 * ______________________________________________________________________
20 */
21
22#ifndef INCLUDE_SOLVERS_AND_PRECONDITIONERS_MGCOARSEGRIDSOLVERS_H_
23#define INCLUDE_SOLVERS_AND_PRECONDITIONERS_MGCOARSEGRIDSOLVERS_H_
24
25// deal.II
26#include <deal.II/lac/la_parallel_vector.h>
27#include <deal.II/lac/petsc_solver.h>
28#include <deal.II/lac/solver_cg.h>
29#include <deal.II/lac/solver_control.h>
30#include <deal.II/lac/solver_gmres.h>
31#include <deal.II/multigrid/mg_base.h>
32#include <deal.II/numerics/vector_tools_mean_value.h>
33
34// ExaDG
35#include <exadg/solvers_and_preconditioners/multigrid/smoothers/chebyshev_smoother.h>
36#include <exadg/solvers_and_preconditioners/preconditioners/block_jacobi_preconditioner.h>
37#include <exadg/solvers_and_preconditioners/preconditioners/jacobi_preconditioner.h>
38#include <exadg/solvers_and_preconditioners/preconditioners/preconditioner_amg.h>
39#include <exadg/solvers_and_preconditioners/preconditioners/preconditioner_base.h>
40#include <exadg/solvers_and_preconditioners/solvers/iterative_solvers_dealii_wrapper.h>
41#include <exadg/solvers_and_preconditioners/solvers/solver_data.h>
42#include <exadg/solvers_and_preconditioners/utilities/petsc_operation.h>
43
44namespace ExaDG
45{
50template<typename Operator>
53 dealii::LinearAlgebra::distributed::Vector<typename Operator::value_type>>
54{
55public:
56 virtual ~CoarseGridSolverBase(){};
57
58 virtual void
59 update() = 0;
60};
61
62enum class KrylovSolverType
63{
64 CG,
65 GMRES
66};
67
68template<typename Operator>
69class MGCoarseKrylov : public CoarseGridSolverBase<Operator>
70{
71public:
72 typedef double NumberAMG;
73
74 typedef typename Operator::value_type MultigridNumber;
75
76 typedef dealii::LinearAlgebra::distributed::Vector<MultigridNumber> VectorType;
77
78 typedef dealii::LinearAlgebra::distributed::Vector<NumberAMG> VectorTypeAMG;
79
81 {
86 : solver_type(KrylovSolverType::CG),
87 solver_data(SolverData(1e4, 1.e-12, 1.e-3, 100)),
88 operator_is_singular(false),
89 preconditioner(MultigridCoarseGridPreconditioner::None),
90 amg_data(AMGData())
91 {
92 }
93
94 // Type of Krylov solver
95 KrylovSolverType solver_type;
96
97 // Solver data
98 SolverData solver_data;
99
100 // in case of singular operators (with constant vectors forming the nullspace) the rhs vector
101 // has to be projected onto the space of vectors with zero mean prior to solving the coarse
102 // grid problem
103 bool operator_is_singular;
104
105 // Preconditioner
106 MultigridCoarseGridPreconditioner preconditioner;
107
108 // Configuration of AMG settings
109 AMGData amg_data;
110 };
111
112 MGCoarseKrylov(Operator const & pde_operator_in,
113 bool const initialize,
114 AdditionalData const & additional_data,
115 MPI_Comm const & comm)
116 : pde_operator(pde_operator_in), additional_data(additional_data), mpi_comm(comm)
117 {
118 if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::PointJacobi)
119 {
120 preconditioner = std::make_shared<JacobiPreconditioner<Operator>>(pde_operator, initialize);
121
122 std::shared_ptr<JacobiPreconditioner<Operator>> jacobi =
123 std::dynamic_pointer_cast<JacobiPreconditioner<Operator>>(preconditioner);
124 AssertDimension(jacobi->get_size_of_diagonal(), pde_operator.m());
125 }
126 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::BlockJacobi)
127 {
128 preconditioner =
129 std::make_shared<BlockJacobiPreconditioner<Operator>>(pde_operator, initialize);
130 }
131 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::AMG)
132 {
133 if(additional_data.amg_data.amg_type == AMGType::ML)
134 {
135#ifdef DEAL_II_WITH_TRILINOS
136 preconditioner_amg =
137 std::make_shared<PreconditionerML<Operator, NumberAMG>>(pde_operator,
138 initialize,
139 additional_data.amg_data.ml_data);
140#else
141 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
142#endif
143 }
144 else if(additional_data.amg_data.amg_type == AMGType::BoomerAMG)
145 {
146#ifdef DEAL_II_WITH_PETSC
147 preconditioner_amg = std::make_shared<PreconditionerBoomerAMG<Operator, NumberAMG>>(
148 pde_operator, initialize, additional_data.amg_data.boomer_data);
149#else
150 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
151#endif
152 }
153 else
154 {
155 AssertThrow(false, dealii::ExcNotImplemented());
156 }
157 }
158 else
159 {
160 AssertThrow(
161 additional_data.preconditioner == MultigridCoarseGridPreconditioner::None or
162 additional_data.preconditioner == MultigridCoarseGridPreconditioner::PointJacobi or
163 additional_data.preconditioner == MultigridCoarseGridPreconditioner::BlockJacobi or
164 additional_data.preconditioner == MultigridCoarseGridPreconditioner::AMG,
165 dealii::ExcMessage("Specified preconditioner for PCG coarse grid solver not implemented."));
166 }
167 }
168
169 void
170 update() final
171 {
172 if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::None)
173 {
174 // do nothing
175 }
176 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::PointJacobi or
177 additional_data.preconditioner == MultigridCoarseGridPreconditioner::BlockJacobi)
178 {
179 preconditioner->update();
180 }
181 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::AMG)
182 {
183 preconditioner_amg->update();
184 }
185 else
186 {
187 AssertThrow(false, dealii::ExcMessage("Not implemented."));
188 }
189 }
190
191 void
192 operator()(unsigned int const, VectorType & dst, VectorType const & src) const final
193 {
194 VectorType r(src);
195 if(additional_data.operator_is_singular)
196 dealii::VectorTools::subtract_mean_value(r);
197
198 if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::AMG)
199 {
200 if(additional_data.amg_data.amg_type == AMGType::ML)
201 {
202#ifdef DEAL_II_WITH_TRILINOS
203 // create temporal vectors of type NumberAMG (double)
204 VectorTypeAMG dst_tri;
205 dst_tri.reinit(dst, false);
206 VectorTypeAMG src_tri;
207 src_tri.reinit(r, true);
208 src_tri.copy_locally_owned_data_from(r);
209
210 std::shared_ptr<PreconditionerML<Operator, NumberAMG>> coarse_operator =
211 std::dynamic_pointer_cast<PreconditionerML<Operator, NumberAMG>>(preconditioner_amg);
212
213 dealii::ReductionControl solver_control(additional_data.solver_data.max_iter,
214 additional_data.solver_data.abs_tol,
215 additional_data.solver_data.rel_tol);
216
217 if(additional_data.solver_type == KrylovSolverType::CG)
218 {
219 dealii::SolverCG<VectorTypeAMG> solver(solver_control);
220 solver.solve(coarse_operator->system_matrix, dst_tri, src_tri, *preconditioner_amg);
221 }
222 else if(additional_data.solver_type == KrylovSolverType::GMRES)
223 {
224 typename dealii::SolverGMRES<VectorTypeAMG>::AdditionalData gmres_data;
225 gmres_data.max_n_tmp_vectors = additional_data.solver_data.max_krylov_size;
226 gmres_data.right_preconditioning = true;
227
228 dealii::SolverGMRES<VectorTypeAMG> solver(solver_control, gmres_data);
229 solver.solve(coarse_operator->system_matrix, dst_tri, src_tri, *preconditioner_amg);
230 }
231 else
232 {
233 AssertThrow(false, dealii::ExcMessage("Not implemented."));
234 }
235
236 // convert NumberAMG (double) -> MultigridNumber (float)
237 dst.copy_locally_owned_data_from(dst_tri);
238#endif
239 }
240 else if(additional_data.amg_data.amg_type == AMGType::BoomerAMG)
241 {
242#ifdef DEAL_II_WITH_PETSC
243 apply_petsc_operation(
244 dst,
245 src,
246 std::dynamic_pointer_cast<PreconditionerBoomerAMG<Operator, NumberAMG>>(
247 preconditioner_amg)
248 ->system_matrix.get_mpi_communicator(),
249 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
250 dealii::PETScWrappers::VectorBase const & petsc_src) {
251 std::shared_ptr<PreconditionerBoomerAMG<Operator, NumberAMG>> coarse_operator =
252 std::dynamic_pointer_cast<PreconditionerBoomerAMG<Operator, NumberAMG>>(
253 preconditioner_amg);
254
255 dealii::ReductionControl solver_control(additional_data.solver_data.max_iter,
256 additional_data.solver_data.abs_tol,
257 additional_data.solver_data.rel_tol);
258
259 if(additional_data.solver_type == KrylovSolverType::CG)
260 {
261 dealii::PETScWrappers::SolverCG solver(solver_control);
262 solver.solve(coarse_operator->system_matrix,
263 petsc_dst,
264 petsc_src,
265 coarse_operator->amg);
266 }
267 else if(additional_data.solver_type == KrylovSolverType::GMRES)
268 {
269 dealii::PETScWrappers::SolverGMRES solver(solver_control);
270 solver.solve(coarse_operator->system_matrix,
271 petsc_dst,
272 petsc_src,
273 coarse_operator->amg);
274 }
275 else
276 {
277 AssertThrow(false, dealii::ExcMessage("Not implemented."));
278 }
279 });
280#endif
281 }
282 else
283 {
284 AssertThrow(false, dealii::ExcNotImplemented());
285 }
286 }
287 else
288 {
289 std::shared_ptr<Krylov::SolverBase<VectorType>> solver;
290
291 if(additional_data.solver_type == KrylovSolverType::CG)
292 {
293 Krylov::SolverDataCG solver_data;
294 solver_data.max_iter = additional_data.solver_data.max_iter;
295 solver_data.solver_tolerance_abs = additional_data.solver_data.abs_tol;
296 solver_data.solver_tolerance_rel = additional_data.solver_data.rel_tol;
297
298 if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::None)
299 {
300 solver_data.use_preconditioner = false;
301 }
302 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::PointJacobi or
303 additional_data.preconditioner == MultigridCoarseGridPreconditioner::BlockJacobi)
304 {
305 solver_data.use_preconditioner = true;
306 }
307 else
308 {
309 AssertThrow(false, dealii::ExcMessage("Not implemented."));
310 }
311
312 solver.reset(
313 new Krylov::SolverCG<Operator, PreconditionerBase<MultigridNumber>, VectorType>(
314 pde_operator, *preconditioner, solver_data));
315 }
316 else if(additional_data.solver_type == KrylovSolverType::GMRES)
317 {
318 Krylov::SolverDataGMRES solver_data;
319
320 solver_data.max_iter = additional_data.solver_data.max_iter;
321 solver_data.solver_tolerance_abs = additional_data.solver_data.abs_tol;
322 solver_data.solver_tolerance_rel = additional_data.solver_data.rel_tol;
323 solver_data.max_n_tmp_vectors = additional_data.solver_data.max_krylov_size;
324
325 if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::None)
326 {
327 solver_data.use_preconditioner = false;
328 }
329 else if(additional_data.preconditioner == MultigridCoarseGridPreconditioner::PointJacobi or
330 additional_data.preconditioner == MultigridCoarseGridPreconditioner::BlockJacobi)
331 {
332 solver_data.use_preconditioner = true;
333 }
334 else
335 {
336 AssertThrow(false, dealii::ExcMessage("Not implemented."));
337 }
338
339 solver.reset(
340 new Krylov::SolverGMRES<Operator, PreconditionerBase<MultigridNumber>, VectorType>(
341 pde_operator, *preconditioner, solver_data, mpi_comm));
342 }
343 else
344 {
345 AssertThrow(false, dealii::ExcMessage("Not implemented."));
346 }
347
348 // Note that the preconditioner has already been updated
349 solver->solve(dst, src);
350 }
351 }
352
353private:
354 const Operator & pde_operator;
355
356 std::shared_ptr<PreconditionerBase<MultigridNumber>> preconditioner;
357
358 // we need a separate object here because the AMG preconditioners need double precision
359 std::shared_ptr<PreconditionerBase<NumberAMG>> preconditioner_amg;
360
361 AdditionalData additional_data;
362
363 MPI_Comm const mpi_comm;
364};
365
366
367template<typename Operator>
369{
370public:
371 typedef typename Operator::value_type MultigridNumber;
372
373 typedef dealii::LinearAlgebra::distributed::Vector<MultigridNumber> VectorType;
374
375 typedef dealii::PreconditionChebyshev<Operator, VectorType, dealii::DiagonalMatrix<VectorType>>
376 DealiiChebyshev;
377
378 MGCoarseChebyshev(Operator const & coarse_operator_in,
379 bool const initialize_preconditioner_in,
380 double const relative_tolerance_in,
381 MultigridCoarseGridPreconditioner const & preconditioner,
382 bool const operator_is_singular_in)
383 : coarse_operator(coarse_operator_in),
384 relative_tolerance(relative_tolerance_in),
385 operator_is_singular(operator_is_singular_in)
386 {
387 AssertThrow(preconditioner == MultigridCoarseGridPreconditioner::PointJacobi,
388 dealii::ExcMessage(
389 "Only PointJacobi preconditioner implemented for Chebyshev coarse-grid solver."));
390
391 if(initialize_preconditioner_in)
392 {
393 update();
394 }
395 }
396
397 void
398 update() final
399 {
400 // use Chebyshev smoother of high degree to solve the coarse grid problem approximately
401 typename DealiiChebyshev::AdditionalData dealii_additional_data;
402
403 std::shared_ptr<dealii::DiagonalMatrix<VectorType>> diagonal_matrix =
404 std::make_shared<dealii::DiagonalMatrix<VectorType>>();
405 VectorType & diagonal_vector = diagonal_matrix->get_vector();
406
407 coarse_operator.initialize_dof_vector(diagonal_vector);
408 coarse_operator.calculate_inverse_diagonal(diagonal_vector);
409
410 std::pair<double, double> eigenvalues =
411 compute_eigenvalues(coarse_operator, diagonal_vector, operator_is_singular);
412
413 double const factor = 1.1;
414
415 dealii_additional_data.preconditioner = diagonal_matrix;
416 dealii_additional_data.max_eigenvalue = factor * eigenvalues.second;
417 dealii_additional_data.smoothing_range = eigenvalues.second / eigenvalues.first * factor;
418
419 double sigma = (1. - std::sqrt(1. / dealii_additional_data.smoothing_range)) /
420 (1. + std::sqrt(1. / dealii_additional_data.smoothing_range));
421
422 // calculate/estimate the number of Chebyshev iterations needed to reach a specified relative
423 // solver tolerance
424 double const eps = relative_tolerance;
425
426 dealii_additional_data.degree = static_cast<unsigned int>(
427 std::log(1. / eps + std::sqrt(1. / eps / eps - 1.)) / std::log(1. / sigma));
428 dealii_additional_data.eig_cg_n_iterations = 0;
429
430 chebyshev_smoother = std::make_shared<DealiiChebyshev>();
431 chebyshev_smoother->initialize(coarse_operator, dealii_additional_data);
432 }
433
434 void
435 operator()(unsigned int const level, VectorType & dst, const VectorType & src) const final
436 {
437 AssertThrow(chebyshev_smoother.get() != 0,
438 dealii::ExcMessage("MGCoarseChebyshev: chebyshev_smoother is not initialized."));
439
440 AssertThrow(level == 0, dealii::ExcNotImplemented());
441
442 chebyshev_smoother->vmult(dst, src);
443 }
444
445private:
446 Operator const & coarse_operator;
447 double const relative_tolerance;
448 bool const operator_is_singular;
449
450 std::shared_ptr<DealiiChebyshev> chebyshev_smoother;
451};
452
453template<typename Operator>
454class MGCoarseAMG : public CoarseGridSolverBase<Operator>
455{
456private:
457 typedef double NumberAMG;
458
459 typedef dealii::LinearAlgebra::distributed::Vector<NumberAMG> VectorTypeAMG;
460
461 typedef dealii::LinearAlgebra::distributed::Vector<typename Operator::value_type>
462 VectorTypeMultigrid;
463
464public:
465 MGCoarseAMG(Operator const & op, bool const initialize, AMGData data = AMGData())
466 {
467 (void)op;
468 (void)data;
469
470 if(data.amg_type == AMGType::BoomerAMG)
471 {
472#ifdef DEAL_II_WITH_PETSC
473 amg_preconditioner =
474 std::make_shared<PreconditionerBoomerAMG<Operator, NumberAMG>>(op,
475 initialize,
476 data.boomer_data);
477#else
478 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
479#endif
480 }
481 else if(data.amg_type == AMGType::ML)
482 {
483#ifdef DEAL_II_WITH_TRILINOS
484 amg_preconditioner =
485 std::make_shared<PreconditionerML<Operator, NumberAMG>>(op, initialize, data.ml_data);
486#else
487 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
488#endif
489 }
490 else
491 {
492 AssertThrow(false, dealii::ExcNotImplemented());
493 }
494 }
495
496 void
497 update() final
498 {
499 amg_preconditioner->update();
500 }
501
502 void
503 operator()(unsigned int const /*level*/,
504 VectorTypeMultigrid & dst,
505 VectorTypeMultigrid const & src) const final
506 {
507 // create temporal vectors of type VectorTypeAMG (double)
508 VectorTypeAMG dst_amg;
509 dst_amg.reinit(dst, false);
510 VectorTypeAMG src_amg;
511 src_amg.reinit(src, true);
512
513 // convert: VectorTypeMultigrid -> VectorTypeAMG
514 src_amg.copy_locally_owned_data_from(src);
515
516 // apply AMG as solver
517 amg_preconditioner->vmult(dst_amg, src_amg);
518
519 // convert: VectorTypeAMG -> VectorTypeMultigrid
520 dst.copy_locally_owned_data_from(dst_amg);
521 }
522
523private:
524 std::shared_ptr<PreconditionerBase<NumberAMG>> amg_preconditioner;
525};
526
527} // namespace ExaDG
528
529#endif /* INCLUDE_SOLVERS_AND_PRECONDITIONERS_MGCOARSEGRIDSOLVERS_H_ */
Definition coarse_grid_solvers.h:54
Definition coarse_grid_solvers.h:455
Definition coarse_grid_solvers.h:369
Definition coarse_grid_solvers.h:70
Definition multigrid_preconditioner_base.h:55
Definition driver.cpp:33
Definition multigrid_parameters.h:98
Definition coarse_grid_solvers.h:81
AdditionalData()
Definition coarse_grid_solvers.h:85
Definition solver_data.h:34