ExaDG
Loading...
Searching...
No Matches
preconditioner_amg.h
1/* ______________________________________________________________________
2 *
3 * ExaDG - High-Order Discontinuous Galerkin for the Exa-Scale
4 *
5 * Copyright (C) 2021 by the ExaDG authors
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <https://www.gnu.org/licenses/>.
19 * ______________________________________________________________________
20 */
21
22#ifndef PRECONDITIONER_AMG
23#define PRECONDITIONER_AMG
24
25#include <deal.II/lac/la_parallel_vector.h>
26#include <deal.II/lac/petsc_precondition.h>
27#include <deal.II/lac/petsc_sparse_matrix.h>
28#include <deal.II/lac/trilinos_precondition.h>
29#include <deal.II/lac/trilinos_sparse_matrix.h>
30
31#include <exadg/solvers_and_preconditioners/multigrid/multigrid_parameters.h>
32#include <exadg/solvers_and_preconditioners/preconditioners/preconditioner_base.h>
33#include <exadg/solvers_and_preconditioners/utilities/petsc_operation.h>
34#include <exadg/utilities/print_functions.h>
35
36namespace ExaDG
37{
38template<int dim, int spacedim>
39std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)>
40create_subcommunicator(dealii::DoFHandler<dim, spacedim> const & dof_handler)
41{
42 unsigned int n_locally_owned_cells = 0;
43 for(auto const & cell : dof_handler.active_cell_iterators())
44 if(cell->is_locally_owned())
45 ++n_locally_owned_cells;
46
47 MPI_Comm const mpi_comm = dof_handler.get_communicator();
48
49 // In case some of the MPI ranks do not have cells, we create a
50 // sub-communicator to exclude all those processes from the MPI
51 // communication in the matrix-based operation sand hence speed up those
52 // operations. Note that we have to free the communicator again, which is
53 // done by a custom deleter of the unique pointer that is run when it goes
54 // out of scope.
55 if(dealii::Utilities::MPI::min(n_locally_owned_cells, mpi_comm) == 0)
56 {
57 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator(new MPI_Comm,
58 [](MPI_Comm * comm) {
59 MPI_Comm_free(comm);
60 delete comm;
61 });
62 MPI_Comm_split(mpi_comm,
63 n_locally_owned_cells > 0,
64 dealii::Utilities::MPI::this_mpi_process(mpi_comm),
65 subcommunicator.get());
66
67 return subcommunicator;
68 }
69 else
70 {
71 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> communicator(new MPI_Comm, [](MPI_Comm * comm) {
72 delete comm;
73 });
74 *communicator = mpi_comm;
75
76 return communicator;
77 }
78}
79
80#ifdef DEAL_II_WITH_TRILINOS
81template<typename Operator, typename Number>
82class PreconditionerML : public PreconditionerBase<Number>
83{
84private:
85 typedef dealii::LinearAlgebra::distributed::Vector<Number> VectorType;
86
87 typedef dealii::TrilinosWrappers::PreconditionAMG::AdditionalData MLData;
88
89public:
90 // distributed sparse system matrix
91 dealii::TrilinosWrappers::SparseMatrix system_matrix;
92
93private:
94 dealii::TrilinosWrappers::PreconditionAMG amg;
95
96public:
97 PreconditionerML(Operator const & op, bool const initialize, MLData ml_data = MLData())
98 : pde_operator(op), ml_data(ml_data)
99 {
100 // initialize system matrix
101 pde_operator.init_system_matrix(system_matrix,
102 op.get_matrix_free().get_dof_handler().get_communicator());
103
104 if(initialize)
105 {
106 this->update();
107 }
108 }
109
110 dealii::TrilinosWrappers::SparseMatrix const &
111 get_system_matrix()
112 {
113 return system_matrix;
114 }
115
116 void
117 vmult(VectorType & dst, VectorType const & src) const override
118 {
119 amg.vmult(dst, src);
120 }
121
122 void
123 update() override
124 {
125 // clear content of matrix since calculate_system_matrix() adds the result
126 system_matrix *= 0.0;
127
128 // re-calculate matrix
129 pde_operator.calculate_system_matrix(system_matrix);
130
131 // initialize Trilinos' AMG
132 amg.initialize(system_matrix, ml_data);
133
134 this->update_needed = false;
135 }
136
137private:
138 // reference to matrix-free operator
139 Operator const & pde_operator;
140
141 MLData ml_data;
142};
143#endif
144
145#ifdef DEAL_II_WITH_PETSC
146/*
147 * Wrapper class for BoomerAMG from Hypre
148 */
149template<typename Operator, typename Number>
150class PreconditionerBoomerAMG : public PreconditionerBase<Number>
151{
152private:
153 typedef dealii::LinearAlgebra::distributed::Vector<Number> VectorType;
154
155 typedef dealii::PETScWrappers::PreconditionBoomerAMG::AdditionalData BoomerData;
156
157 // subcommunicator; declared before the matrix to ensure that it gets
158 // deleted after the matrix and preconditioner depending on it
159 std::unique_ptr<MPI_Comm, void (*)(MPI_Comm *)> subcommunicator;
160
161public:
162 // distributed sparse system matrix
163 dealii::PETScWrappers::MPI::SparseMatrix system_matrix;
164
165 // amg preconditioner for access by PETSc solver
166 dealii::PETScWrappers::PreconditionBoomerAMG amg;
167
168 PreconditionerBoomerAMG(Operator const & op,
169 bool const initialize,
170 BoomerData boomer_data = BoomerData())
171 : subcommunicator(
172 create_subcommunicator(op.get_matrix_free().get_dof_handler(op.get_dof_index()))),
173 pde_operator(op),
174 boomer_data(boomer_data)
175 {
176 // initialize system matrix
177 pde_operator.init_system_matrix(system_matrix, *subcommunicator);
178
179 if(initialize)
180 {
181 this->update();
182 }
183 }
184
185 ~PreconditionerBoomerAMG()
186 {
187 if(system_matrix.m() > 0)
188 {
189 PetscErrorCode ierr = VecDestroy(&petsc_vector_dst);
190 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
191 ierr = VecDestroy(&petsc_vector_src);
192 AssertThrow(ierr == 0, dealii::ExcPETScError(ierr));
193 }
194 }
195
196 dealii::PETScWrappers::MPI::SparseMatrix const &
197 get_system_matrix()
198 {
199 return system_matrix;
200 }
201
202 void
203 vmult(VectorType & dst, VectorType const & src) const override
204 {
205 if(system_matrix.m() > 0)
206 apply_petsc_operation(dst,
207 src,
208 petsc_vector_dst,
209 petsc_vector_src,
210 [&](dealii::PETScWrappers::VectorBase & petsc_dst,
211 dealii::PETScWrappers::VectorBase const & petsc_src) {
212 amg.vmult(petsc_dst, petsc_src);
213 });
214 }
215
216 void
217 update() override
218 {
219 // clear content of matrix since the next calculate_system_matrix calls
220 // add their result; since we might run this on a sub-communicator, we
221 // skip the processes that do not participate in the matrix and have size
222 // zero
223 if(system_matrix.m() > 0)
224 system_matrix = 0.0;
225
226 calculate_preconditioner();
227
228 this->update_needed = false;
229 }
230
231private:
232 void
233 calculate_preconditioner()
234 {
235 // calculate_matrix in case the current MPI rank participates in the PETSc communicator
236 if(system_matrix.m() > 0)
237 {
238 pde_operator.calculate_system_matrix(system_matrix);
239
240 amg.initialize(system_matrix, boomer_data);
241
242 // get vector partitioner
243 dealii::LinearAlgebra::distributed::Vector<typename Operator::value_type> vector;
244 pde_operator.initialize_dof_vector(vector);
245 VecCreateMPI(system_matrix.get_mpi_communicator(),
246 vector.get_partitioner()->locally_owned_size(),
247 PETSC_DETERMINE,
248 &petsc_vector_dst);
249 VecCreateMPI(system_matrix.get_mpi_communicator(),
250 vector.get_partitioner()->locally_owned_size(),
251 PETSC_DETERMINE,
252 &petsc_vector_src);
253 }
254 }
255
256 // reference to matrix-free operator
257 Operator const & pde_operator;
258
259 BoomerData boomer_data;
260
261 // PETSc vector objects to avoid re-allocation in every vmult() operation
262 mutable VectorTypePETSc petsc_vector_src;
263 mutable VectorTypePETSc petsc_vector_dst;
264};
265#endif
266
270template<typename Operator, typename Number>
272{
273private:
274 typedef double NumberAMG;
275 typedef typename PreconditionerBase<Number>::VectorType VectorType;
276 typedef dealii::LinearAlgebra::distributed::Vector<NumberAMG> VectorTypeAMG;
277
278public:
279 PreconditionerAMG(Operator const & pde_operator, bool const initialize, AMGData const & data)
280 {
281 (void)pde_operator;
282 (void)initialize;
283 (void)data;
284
285 if(data.amg_type == AMGType::BoomerAMG)
286 {
287#ifdef DEAL_II_WITH_PETSC
288 preconditioner_amg =
289 std::make_shared<PreconditionerBoomerAMG<Operator, double>>(pde_operator,
290 initialize,
291 data.boomer_data);
292#else
293 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with PETSc!"));
294#endif
295 }
296 else if(data.amg_type == AMGType::ML)
297 {
298#ifdef DEAL_II_WITH_TRILINOS
299 preconditioner_amg = std::make_shared<PreconditionerML<Operator, double>>(pde_operator,
300 initialize,
301 data.ml_data);
302#else
303 AssertThrow(false, dealii::ExcMessage("deal.II is not compiled with Trilinos!"));
304#endif
305 }
306 else
307 {
308 AssertThrow(false, dealii::ExcNotImplemented());
309 }
310 }
311
312 void
313 vmult(VectorType & dst, VectorType const & src) const final
314 {
315 if constexpr(std::is_same_v<Number, NumberAMG>)
316 {
317 preconditioner_amg->vmult(dst, src);
318 }
319 else
320 {
321 // create temporal vectors of type NumberAMG
322 VectorTypeAMG dst_amg;
323 dst_amg.reinit(dst, false);
324 VectorTypeAMG src_amg;
325 src_amg.reinit(src, true);
326 src_amg = src;
327
328 preconditioner_amg->vmult(dst_amg, src_amg);
329
330 // convert: NumberAMG -> Number
331 dst.copy_locally_owned_data_from(dst_amg);
332 }
333 }
334
335private:
336 void
337 update() final
338 {
339 preconditioner_amg->update();
340
341 this->update_needed = false;
342 }
343
344 std::shared_ptr<PreconditionerBase<NumberAMG>> preconditioner_amg;
345};
346
347} // namespace ExaDG
348
349#endif
Definition preconditioner_amg.h:272
Definition preconditioner_base.h:35
Definition driver.cpp:33
Definition multigrid_parameters.h:98