diff --git a/demos/demo_references.bib b/demos/demo_references.bib index 5792042258..035a681015 100644 --- a/demos/demo_references.bib +++ b/demos/demo_references.bib @@ -394,3 +394,40 @@ @Article{Farrell2015 pages = {A2026--A2045}, doi = {10.1137/140984798}, } + +@article{Becker2001, + author = {Roland Becker and Rolf Rannacher}, + title = {An optimal control approach to a posteriori error estimation in finite element methods}, + journal = {Acta Numerica}, + volume = {10}, + pages = {1-102}, + year = {2001}, + doi = {10.1017/S0962492901000010}, +} + +@article{Rognes2010, +author = {Rognes, Marie E. and Logg, Anders}, +title = {Automated goal-oriented error control {I}: stationary variational problems}, +journal = {SIAM Journal on Scientific Computing}, +volume = {35}, +number = {3}, +pages = {C173--C193}, +year = {2013}, +doi = {10.1137/10081962X}, +} + +@inbook{Endtmayer2024, + ISBN={9780443294501}, + DOI={10.1016/bs.aams.2024.08.003}, + booktitle={Error Control, + Adaptive Discretizations, + and Applications, + Part 2}, + title={A posteriori single- and multi-goal error control and adaptivity for partial differential equations}, + author={Bernhard Endtmayer and Ulrich Langer and Thomas Richter and Andreas Schafelner and Thomas Wick}, + series={Advances in Applied Mechanics}, + volume={59}, + publisher={Elsevier}, + year={2024}, + pages={19--108} } + diff --git a/demos/goal_based_adaptivity_bvp/goal_based_adaptivity_bvp.py.rst b/demos/goal_based_adaptivity_bvp/goal_based_adaptivity_bvp.py.rst new file mode 100644 index 0000000000..8e099b26d0 --- /dev/null +++ b/demos/goal_based_adaptivity_bvp/goal_based_adaptivity_bvp.py.rst @@ -0,0 +1,135 @@ +Goal-based adaptivity for stationary boundary value problems +============================================================ + +.. rst-class:: emphasis + + The dual-weighted residual (DWR) method is a technique for designing global and local + error estimators for the error in a goal functional :math:`J(u)`, where :math:`u` + is the solution of a partial differential equation. Deriving the DWR method for + a specific problem usually involves substantial expertise, in deriving the appropriate + adjoint equation, residual formulation, etc. In this demo we show how the DWR method + can be automatically implemented in Firedrake for stationary boundary value problems. + + The demo was contributed by `Patrick Farrell `__, based on the MSc project of + `Joseph Flood `__. + +The dual-weighted residual (DWR) method :cite:`Becker2001` is a technique for designing global and local error estimators for the error in a goal functional :math:`J(u)`. While implementing DWR by hand involves substantial expertise, the high-level symbolic UFL representation of the problem to solve permits the *automation* of DWR :cite:`Rognes2010`. + +In this demo we demonstrate how to automatically apply DWR to a nonlinear stationary boundary-value problem, the :math:`p`-Laplacian: + +.. math:: + -\nabla \cdot \left( |\nabla u|^{p-2} \nabla u \right) = f \text{ in } \Omega, \quad u = 0 \text{ on } \partial \Omega. + +We solve the problem on a unit square with known analytical solution, so that we can compute effectivity indices of our error estimates. +Since we will be adapting the mesh, :doc:`we must build the domain with Netgen `. +Notice that we can set very coarse tolerances on the nonlinear solvers, as DWR also provides an estimate of the algebraic solver error: :: + + from firedrake import * + from netgen.occ import * + square = WorkPlane().Rectangle(1, 1).Face().bc("all") + square.edges.Max(Y).name = "top" + geo = OCCGeometry(square, dim=2) + ngmesh = geo.GenerateMesh(maxh=0.1) + mesh = Mesh(ngmesh) + + degree = 3 + V = FunctionSpace(mesh, "CG", degree) + (x, y) = SpatialCoordinate(mesh) + + p = Constant(5) + u_exact = x*(1-x)*y*(1-y)*exp(2*pi*x)*cos(pi*y) + f = -div(inner(grad(u_exact), grad(u_exact))**((p-2)/2) * grad(u_exact)) + + # Since the problem is highly nonlinear, for the purposes of this demo we will + # cheat and pick our initial guess really close to the exact solution. + u = Function(V, name="Solution") + u.interpolate(0.99*u_exact) + + v = TestFunction(V) + F = (inner(inner(grad(u), grad(u))**((p-2)/2) * grad(u), grad(v)) * dx(degree=degree+10) + - inner(f, v) * dx(degree=degree+10) + ) + bcs = DirichletBC(V, u_exact, "on_boundary") + solver_parameters = { + "snes_monitor": None, + "snes_atol": 1e-6, + "snes_rtol": 1e-1, # very coarse! + "snes_linesearch_monitor": None, + "snes_linesearch_type": "l2", + "snes_linesearch_maxlambda": 1} + +To apply goal-based adaptivity, we need a goal functional. For this we will employ the integral of the normal derivative of the solution on the top boundary: :: + + top = tuple(i + 1 for (i, name) in enumerate(ngmesh.GetRegionNames(codim=1)) if name == "top") + n = FacetNormal(mesh) + J = inner(grad(u), n)*ds(top) + +We now specify options for how the goal-based adaptivity should proceed. We choose to use an expensive/robust approach, +where the adjoint solution is approximated in a higher-degree function space, and where both the adjoint and primal residuals +are employed for the error estimate. This requires four solves on every grid (primal and adjoint solutions with degree :math:`p` +and :math:`p+1`), and gives a provably efficient and reliable error estimator under a saturation assumption up to a term that is cubic in the error :cite:`Endtmayer2024`. +It is possible to employ cheaper and more practical approximations by setting the options for the :code:`GoalAdaptiveNonlinearVariationalSolver` +appropriately, as discussed below. :: + + goal_adaptive_options = { + "max_iterations": 100, + "use_adjoint_residual": True, + "dual_low_method": "solve", + "primal_low_method": "solve", + "dorfler_alpha": 0.5, + "dual_extra_degree": 1, + "run_name": "p-laplace", + "output_dir": "./output", + } + +We then solve the problem, passing the goal functional :math:`J` and our specified tolerance. We also pass the exact solution, so that +the DWR automation can compute effectivity indices, but this is not generally required: :: + + tolerance = 1e-4 + problem = NonlinearVariationalProblem(F, u, bcs) + + adaptive_solver = GoalAdaptiveNonlinearVariationalSolver(problem, J, tolerance, + goal_adaptive_options=goal_adaptive_options, + primal_solver_parameters=solver_parameters, + exact_solution=u_exact) + adaptive_solution, error_estimate = adaptive_solver.solve() + +The initial error in the goal functional is :math:`-3.5 \times 10^{-2}`. The solver terminates with the goal functional computed to :math:`10^{-4}` after 4 refinements. Each nonlinear solve only required one Newton iteration. The error estimates :math:`\eta` are very accurate: their effectivity indices + +.. math:: + + I = \frac{\eta}{J(u) - J(u_h)} + +are very close to one throughout, except in the final step: + ++-----------------------+-------------------------------+ +| Number of refinements | Effectivity index :math:`I` | ++=======================+===============================+ +| 0 | 1.0034 | ++-----------------------+-------------------------------+ +| 1 | 1.0354 | ++-----------------------+-------------------------------+ +| 2 | 1.0749 | ++-----------------------+-------------------------------+ +| 3 | 1.0406 | ++-----------------------+-------------------------------+ +| 4 | 4.1940 | ++-----------------------+-------------------------------+ + +The effectivity index in the final step is larger than one because the solver error is larger than the discretisation error. (The code prints a warning to refine the solver tolerances if this happens, which in this case we can safely ignore.) + +Changing the tolerance to :math:`10^{-8}` takes 40 refinements. The resulting mesh is plotted below. The mesh resolution is adaptively concentrated at the top boundary, since the goal functional is localised there. + +.. image:: mesh_40.png + :align: center + :width: 60% + +We now discuss more practical variants. The configuration above solves four PDEs per adaptive step (primal and adjoint, degree :math:`p` and :math:`p+1`). Changing `goal_adaptive_options` to `{"use_adjoint_residual": False, "dual_low_method": "interpolate"}` instead only solves two PDEs per adaptive step (primal at degree :math:`p`, and adjoint at degree :math:`p+1`), and is thus much faster. For this problem with tolerance :math:`10^{-4}` this barely makes a difference to the effectivity indices: most are around 1, with only one step where :math:`I \approx 1.25`. We therefore recommend this as the default settings for production use. + +:demo:`A Python script version of this demo can be found here +`. + +.. rubric:: References + +.. bibliography:: demo_references.bib + :filter: docname in docnames diff --git a/demos/goal_based_adaptivity_bvp/mesh_40.png b/demos/goal_based_adaptivity_bvp/mesh_40.png new file mode 100644 index 0000000000..f8d8d056ae Binary files /dev/null and b/demos/goal_based_adaptivity_bvp/mesh_40.png differ diff --git a/docs/source/advanced_tut.rst b/docs/source/advanced_tut.rst index 5007d12f4b..152649e38e 100644 --- a/docs/source/advanced_tut.rst +++ b/docs/source/advanced_tut.rst @@ -35,3 +35,4 @@ element systems. Steady Boussinesq problem with integral constraints. Steady multicomponent flow -- microfluidic mixing of hydrocarbons. Deflation techniques for computing multiple solutions of nonlinear problems. + Goal-based adaptivity for stationary boundary value problems. diff --git a/firedrake/__init__.py b/firedrake/__init__.py index e0009c2b0e..689c902b93 100644 --- a/firedrake/__init__.py +++ b/firedrake/__init__.py @@ -169,6 +169,10 @@ def init_petsc(): from firedrake.matrix import ( # noqa: F401 MatrixBase, Matrix, ImplicitMatrix, AssembledMatrix ) +from firedrake.adaptive_variational_solver import ( # noqa: F401 + GoalAdaptiveNonlinearVariationalSolver +) + # Set default log level set_log_level(WARNING) diff --git a/firedrake/adaptive_variational_solver.py b/firedrake/adaptive_variational_solver.py new file mode 100644 index 0000000000..d418c13015 --- /dev/null +++ b/firedrake/adaptive_variational_solver.py @@ -0,0 +1,593 @@ +from firedrake.petsc import PETSc +from firedrake.function import Function +from firedrake.functionspace import FunctionSpace, TensorFunctionSpace +from firedrake.ufl_expr import TestFunction, TrialFunction, derivative, action, adjoint +from firedrake.variational_solver import (NonlinearVariationalProblem, NonlinearVariationalSolver, + LinearVariationalProblem, LinearVariationalSolver) +from firedrake.solving import solve +from firedrake.output import VTKFile +from firedrake.logging import RED + +from firedrake.preconditioners.pmg import PMGPC +from firedrake.mg.utils import get_level +from firedrake.mg.ufl_utils import refine +from firedrake.mg.adaptive_hierarchy import AdaptiveMeshHierarchy +from firedrake.mg.adaptive_transfer_manager import AdaptiveTransferManager + +from finat.ufl import BrokenElement, FiniteElement +from ufl import avg, dx, ds, dS, inner, as_vector, replace +import ufl +import numpy as np + + +__all__ = ["GoalAdaptiveNonlinearVariationalSolver"] + + +class GoalAdaptiveOptions: + def __init__(self, + dorfler_alpha: float = 0.5, + max_iterations: int = 10, + output_dir: str = "./output", + run_name: str = "default", + primal_extra_degree: int = 1, + dual_extra_degree: int = 1, + cell_residual_extra_degree: int = 1, + facet_residual_extra_degree: int = 1, + use_adjoint_residual: bool = False, + uniform_refinement: bool = False, + primal_low_method: str = "interpolate", + dual_low_method: str = "interpolate", + write_solution: str = "all", + write_solution_frequency: int | None = None, + verbose: bool = True, + ): + self.dorfler_alpha = dorfler_alpha # Dorfler marking threshold + self.max_iterations = max_iterations + self.output_dir = output_dir + self.run_name = run_name + self.primal_extra_degree = primal_extra_degree + self.dual_extra_degree = dual_extra_degree + self.cell_residual_extra_degree = cell_residual_extra_degree + self.facet_residual_extra_degree = facet_residual_extra_degree + self.use_adjoint_residual = use_adjoint_residual # For switching between primal and primal + adjoint residuals + self.uniform_refinement = uniform_refinement + self.primal_low_method = primal_low_method + self.dual_low_method = dual_low_method + self.write_solution = write_solution # options: "all", "first_and_last", "by iteration", None + self.write_solution_frequency = write_solution_frequency + self.verbose = verbose + + # Solver parameters + sp_cell = { + "mat_type": "matfree", + "snes_type": "ksponly", + "ksp_type": "cg", + "pc_type": "jacobi", + } + sp_facet = { + "snes_type": "ksponly", + "ksp_type": "cg", + "pc_type": "jacobi", + } + + +class GoalAdaptiveNonlinearVariationalSolver: + """Solves a nonlinear variational problem to minimise the error in a + user-specified goal functional. We do this by adaptively refining the mesh + based on the solution to a dual problem - which links the goal functional + to the PDE. + + Parameters + ---------- + problem + The variational formulation of the PDE defined on the coarse mesh + goal_functional + The goal functional defined in terms of the solution to the PDE + goal_adaptive_options + An options dictionary to construct a :class:`GoalAdaptiveOptions` + primal_solver_parameters + A dictionary of solver parameters for the primal problem + dual_solver_parameters + A dictionary of solver parameters for the dual problem. + Defaults to `primal_solver_parameters`. + primal_solver_kwargs + Keyword arguments for the primal :class:`~.NonlinearVariationalSolver` + dual_solver_kwargs + Keyword arguments for the primal :class:`~.LinearVariationalSolver` + exact_solution + The exact solution to the problem (optional). + If supplied, it is used to calculate the efficiency of the error estimate + exact_goal + The exact value for the goal functional (optional). + If supplied, it is used to calculate the efficiency of the error estimate. + """ + def __init__(self, + problem: NonlinearVariationalProblem, + goal_functional: ufl.BaseForm, + tolerance: float, + goal_adaptive_options: dict | None = None, + primal_solver_parameters: dict | None = None, + dual_solver_parameters: dict | None = None, + primal_solver_kwargs: dict | None = None, + dual_solver_kwargs: dict | None = None, + exact_solution: ufl.classes.Expr | None = None, + exact_goal: ufl.classes.Expr | None = None, + ): + if goal_adaptive_options is None: + goal_adaptive_options = {} + + if not (isinstance(goal_functional, ufl.BaseForm) and len(goal_functional.arguments()) == 0): + raise ValueError("goal_functional must be a 0-form") + self.problem = problem + self.goal_functional = goal_functional + self.tolerance = tolerance + self.options = GoalAdaptiveOptions(**goal_adaptive_options) + self.sp_primal = primal_solver_parameters + self.sp_dual = dual_solver_parameters if dual_solver_parameters is not None else primal_solver_parameters + self.primal_solver_kwargs = primal_solver_kwargs if primal_solver_kwargs is not None else {} + self.dual_solver_kwargs = dual_solver_kwargs if dual_solver_kwargs is not None else {} + self.u_exact = as_mixed(exact_solution) if isinstance(exact_solution, (tuple, list)) else exact_solution + self.goal_exact = exact_goal + + # Set up an AdaptiveMeshHierarchy for every mesh of the problem + V = problem.u.function_space() + meshes = set(V.mesh()) + meshes.add(V.mesh()) + for mesh in meshes: + mh, level = get_level(mesh) + if mh is None: + amh = AdaptiveMeshHierarchy(mesh) + else: + amh = AdaptiveMeshHierarchy(mh[0]) + for m in mh[1:level+1]: + amh.add_mesh(m) + + self.atm = AdaptiveTransferManager() + self.base_levels = len(amh) + + # Data storage + self.Ndofs_vec = [] + self.eta_vec = [] + self.etah_vec = [] + self.eta_cell_sum_vec = [] + self.eff1_vec = [] + self.eff2_vec = [] + self.eff3_vec = [] + self.u_high = None + + def solve_primal(self): + F = self.problem.F + u = self.problem.u + bcs = self.problem.bcs + V = self.problem.u.function_space() + self.Ndofs_vec.append(V.dim()) + + def solve_uh(): + self.print(f'Solving primal (degree: {V.ufl_element().degree()}, dofs: {V.dim()}) ...') + solver = NonlinearVariationalSolver(self.problem, solver_parameters=self.sp_primal, + **self.primal_solver_kwargs) + solver.set_transfer_manager(self.atm) + solver.solve() + self.primal_solver = solver + + if self.options.use_adjoint_residual: + if self.options.primal_low_method == "solve": + solve_uh() + + # Now solve in higher-order space + high_degree = V.ufl_element().degree() + self.options.primal_extra_degree + V_high = reconstruct_degree(V, high_degree) + u_high = Function(V_high, name="high_order_solution") + u_high.interpolate(u) + + v_old, = F.arguments() + v_high = TestFunction(V_high) + F_high = replace(F, {v_old: v_high, u: u_high}) + bcs_high = [bc.reconstruct(V=V_high, indices=bc._indices) for bc in bcs] + problem_high = NonlinearVariationalProblem(F_high, u_high, bcs_high) + + self.print(f"Solving primal with higher order for error estimate (degree: {high_degree}, dofs: {V_high.dim()}) ...") + solver = NonlinearVariationalSolver(problem_high, solver_parameters=self.sp_primal, + **self.primal_solver_kwargs) + solver.set_transfer_manager(self.atm) + solver.solve() + self.primal_solver = solver + + self.u_high = u_high + + if self.options.primal_low_method == "solve": + pass + elif self.options.primal_low_method == "project": + u.project(u_high) + elif self.options.primal_low_method == "interpolate": + u.interpolate(u_high) + else: + raise ValueError(f"Unrecognised primal_low_method {self.options.primal_low_method}") + u_err = u_high - u + else: + solve_uh() + u_err = None + return u_err + + def solve_dual(self): + + def solve_zh(z): + bcs = self.problem.bcs + J = self.goal_functional + F = self.problem.F + u = self.problem.u + + Z = z.function_space() + self.print(f"Solving dual (degree: {Z.ufl_element().degree()}, dofs: {Z.dim()}) ...") + + Fz = residual(F, TestFunction(Z)) + dF = derivative(Fz, u, TrialFunction(Z)) + dJ = derivative(J, u, TestFunction(Z)) + a = adjoint(dF) + + if self.u_high is not None: + a = replace(a, {u: self.u_high}) + dJ = replace(dJ, {u: self.u_high}) + + bcs_dual = [bc.reconstruct(V=Z, indices=bc._indices, g=0) for bc in bcs] + problem = LinearVariationalProblem(a, dJ, z, bcs_dual) + solver = LinearVariationalSolver(problem, solver_parameters=self.sp_dual, **self.dual_solver_kwargs) + solver.set_transfer_manager(self.atm) + solver.solve() + + # Higher-order dual solution + V = self.problem.u.function_space() + dual_degree = V.ufl_element().degree() + self.options.dual_extra_degree + V_dual = reconstruct_degree(V, dual_degree) + z = Function(V_dual, name="dual_high_order_solution") + solve_zh(z) + + # Lower-order dual solution + z_lo = Function(V, name="dual_low_order_solution") + if self.options.dual_low_method == "solve": + z_lo.interpolate(z) + solve_zh(z_lo) + elif self.options.dual_low_method == "project": + z_lo.project(z) + elif self.options.dual_low_method == "interpolate": + z_lo.interpolate(z) + else: + raise ValueError(f"Unrecognised dual_low_method {self.options.dual_low_method}") + z_err = z - z_lo + self.z = z + return z_lo, z_err + + def estimate_error(self, u_err, z_lo, z_err): + """Compute error estimate""" + from firedrake.assemble import assemble + J = self.goal_functional + F = self.problem.F + u = self.problem.u + + # Primal contribution to error estimator + primal_err = assemble(residual(F, -z_err)) + + # Dual contribution to error estimator + if self.options.use_adjoint_residual: + Z = z_lo.function_space() + dF = derivative(F, u, TrialFunction(Z)) + dJ = derivative(J, u) + G = action(adjoint(dF), z_lo) - dJ + + dual_err = assemble(residual(G, -u_err)) + discretisation_error = 0.5 * (primal_err + dual_err) + else: + discretisation_error = primal_err + + # Estimate of solver error + solver_error = assemble(residual(F, -z_lo)) + + if abs(solver_error) > abs(discretisation_error): + self.print(RED % 'Warning: solver error estimate greater than discretisation error estimate, refine solver tolerances') + + # Final error estimate + eta_h = discretisation_error + solver_error + self.etah_vec.append(eta_h) + + Juh = assemble(J) + self.print(f'{"Computed goal J(uh):":40s}{Juh:15.12f}') + self.Juh = Juh + if self.goal_exact is not None: + Ju = assemble(self.goal_exact) + elif self.u_exact is not None: + Ju = assemble(replace(J, {u: self.u_exact})) + else: + Ju = None + + if Ju is not None: + eta = Ju - Juh + self.eta_vec.append(eta) + self.print(f'{"Exact goal J(u):":40s}{Ju: 15.12f}') + self.print(f'{"True error, J(u) - J(u_h):":40s}{eta: 15.12e}') + else: + eta = None + + if self.options.use_adjoint_residual: + self.print(f'{"Primal error, rho(u_h; z-z_h):":40s}{primal_err: 15.12e}') + self.print(f'{"Dual error, rho*(z_h; u-u_h):":40s}{dual_err: 15.12e}') + self.print(f'{"Difference":40s}{abs(primal_err-dual_err):19.12e}') + self.print(f'{"Discretisation error, 0.5(rho + rho*)":40s}{discretisation_error: 15.12e}') + else: + self.print(f'{"Discretisation error, rho(u_h; z-z_h)":40s}{discretisation_error: 15.12e}') + self.print(f'{"Solver error, rho(u_h; z_h):":40s}{solver_error: 15.12e}') + self.print(f'{"Final error estimate:":40s}{eta_h: 15.12e}') + return eta_h, eta + + def compute_error_indicators(self, u_err, z_lo, z_err): + """Compute cell and facet residuals R_cell, R_facet""" + from firedrake.assemble import assemble + J = self.goal_functional + F = self.problem.F + u = self.problem.u + V = u.function_space() + + mesh = V.mesh().unique() + dim = mesh.topological_dimension + cell = mesh.ufl_cell() + variant = "integral" + degree = V.ufl_element().degree() + cell_residual_degree = degree + self.options.cell_residual_extra_degree + facet_residual_degree = degree + self.options.facet_residual_extra_degree + # ------------------------------- Primal residual ------------------------------- + # Cell bubbles + B = FunctionSpace(mesh, "B", dim+1, variant=variant) # Bubble function space + bubbles = Function(B).assign(1) # Bubbles + # DG space on cell interiors + if V.value_shape == (): + DG = FunctionSpace(mesh, "DG", cell_residual_degree, variant=variant) + else: + DG = TensorFunctionSpace(mesh, "DG", cell_residual_degree, variant=variant, shape=V.value_shape) + uc = TrialFunction(DG) + vc = TestFunction(DG) + ac = inner(uc, bubbles*vc)*dx + Lc = residual(F, bubbles*vc) + # solve for Rcell + Rcell = Function(DG) + solve(ac == Lc, Rcell, solver_parameters=self.options.sp_cell) + + # Facet bubbles + FB = FunctionSpace(mesh, "FB", dim, variant=variant) + cones = Function(FB).assign(1) + # Broken facet bubble space + el = BrokenElement(FiniteElement("FB", cell=cell, degree=facet_residual_degree+dim, variant=variant)) + if V.value_shape == (): + Q = FunctionSpace(mesh, el) + else: + Q = TensorFunctionSpace(mesh, el, shape=V.value_shape) + Qtest = TestFunction(Q) + Qtrial = TrialFunction(Q) + Lf = residual(F, Qtest) - inner(Rcell, Qtest)*dx + af = both(inner(Qtrial/cones, Qtest))*dS + inner(Qtrial/cones, Qtest)*ds + # solve for Rfacet + Rhat = Function(Q) + solve(af == Lf, Rhat, solver_parameters=self.options.sp_facet) + Rfacet = Rhat/cones + + # Primal error indicators + DG0 = FunctionSpace(mesh, "DG", degree=0) + test = TestFunction(DG0) + + eta_primal = assemble( + inner(inner(Rcell, z_err), test)*dx + + + inner(avg(inner(Rfacet, z_err)), both(test))*dS + + + inner(inner(Rfacet, z_err), test)*ds + ) + with eta_primal.dat.vec as evec: + evec.abs() + + # ------------------------------- Adjoint residual ------------------------------- + if self.options.use_adjoint_residual: + # r*(v) = J'(u)[v] - A'_u(u)[v, z] since F = A(u;v) - L(v) + dF = derivative(F, u, TrialFunction(V)) + dJ = derivative(J, u, TestFunction(V)) + rstar = action(adjoint(dF), z_lo) - dJ + + # dual: project r* -> Rcell*, Rfacet* + Lc_star = residual(rstar, bubbles*vc) + Rcell_star = Function(DG) + solve(ac == Lc_star, Rcell_star, solver_parameters=self.options.sp_cell) + + Lf_star = residual(rstar, Qtest) - inner(Rcell_star, Qtest)*dx + Rhat_star = Function(Q) + solve(af == Lf_star, Rhat_star, solver_parameters=self.options.sp_facet) + Rfacet_star = Rhat_star/cones + + eta_dual = assemble( + inner(inner(Rcell_star, u_err), test)*dx + + inner(avg(inner(Rfacet_star, u_err)), both(test))*dS + + inner(inner(Rfacet_star, u_err), test)*ds + ) + with eta_dual.dat.vec as evec: + evec.abs() + eta_cell = assemble(0.5*(eta_primal + eta_dual)) + + with eta_primal.dat.vec as evec: + self.eta_primal_total = abs(evec.sum()) + with eta_dual.dat.vec as evec: + self.eta_dual_total = abs(evec.sum()) + self.print(f'{"Sum of primal refinement indicators:":40s}{self.eta_primal_total: 15.12e}') + self.print(f'{"Sum of dual refinement indicators:":40s}{self.eta_dual_total: 15.12e}') + else: + eta_cell = eta_primal + + return eta_cell + + def compute_efficiency_indices(self, eta_cell, eta_h, eta): + """Compute efficiency indices""" + with eta_cell.dat.vec as evec: + eta_cell_total = abs(evec.sum()) + + self.eta_cell_sum_vec.append(eta_cell_total) + self.print(f'{"Sum of refinement indicators:":40s}{eta_cell_total: 15.12e}') + + if self.u_exact is not None or self.goal_exact is not None: + eff1 = abs(eta_h / eta) + eff2 = abs(eta_cell_total / eta) + self.eff1_vec.append(eff1) + self.eff2_vec.append(eff2) + self.print(f'{"Effectivity index:":40s}{eff1: 15.12f}') + self.print(f'{"Localisation efficiency:":40s}{eff2: 15.12f}') + else: + eff3 = eta_cell_total / eta_h + self.eff3_vec.append(eff3) + self.print(f'{"Localisation efficiency:":40s}{eff3: 15.12f}') + + def set_adaptive_cell_markers(self, eta_cell): + """Mark cells for refinement (Dorfler marking)""" + # NOTE this is not quite Dorfler marking + # For Dorfler marking we need to implement a parallel sort + markers = Function(eta_cell.function_space()) + for m, e in zip(markers.subfunctions, eta_cell.subfunctions): + with e.dat.vec_ro as evec: + _, emax = evec.max() + threshold = self.options.dorfler_alpha * emax + m.dat.data_wo[e.dat.data_ro > threshold] = 1 + return markers + + def set_uniform_cell_markers(self): + """Uniform marking for comparison tests""" + V = self.problem.u.function_space() + mesh = V.mesh().unique() + markers_space = FunctionSpace(mesh, "DG", 0) + markers = Function(markers_space) + markers.assign(1) + return markers + + def refine_problem(self, markers): + """Adaptively refine the mesh and rediscretise the problem on the refined mesh""" + for marker in markers.subfunctions: + mesh = marker.function_space().mesh() + new_mesh = mesh.refine_marked_elements(marker) + amh, _ = get_level(mesh) + amh.add_mesh(new_mesh) + # Reconstruct MeshSequence with the refined meshes + mesh = self.problem.u.function_space().mesh() + if len(mesh) > 1: + new_mesh = type(mesh)([get_level(m)[0][-1] for m in mesh]) + amh, _ = get_level(mesh) + amh.add_mesh(new_mesh) + + coef_map = {} + self.problem = refine(self.problem, refine, coefficient_mapping=coef_map) + self.goal_functional = refine(self.goal_functional, refine, coefficient_mapping=coef_map) + if self.u_exact is not None: + self.u_exact = refine(self.u_exact, refine, coefficient_mapping=coef_map) + + def solve(self): + """Run the adaptive refinement loop: SOLVE -> ESTIMATE -> MARK -> REFINE. + + Returns + ------- + Tuple[Function, float] + A tuple with the solution on the finest level of the hierarchy + and the error estimate. + + """ + for it in range(self.options.max_iterations): + try: + self.step(it=it) + except StopIteration: + break + u_out = self.u_high if self.u_high is not None else self.problem.u + error_estimate = self.etah_vec[-1] + self.u_high = None + return u_out, error_estimate + + def step(self, it=None): + """Compute one step of SOLVE -> ESTIMATE -> MARK -> REFINE + + Returns + ------- + Tuple[Function, float] + A tuple with the solution on the finest level of the hierarchy + and the error estimate. + + """ + if it is None: + V = self.problem.u.function_space() + _, it = get_level(V.mesh()) + self.print(f"---------------------------- [MESH LEVEL {it}] ----------------------------") + # SOLVE + u_err = self.solve_primal() + u_out = self.u_high if self.u_high is not None else self.problem.u + z_lo, z_err = self.solve_dual() + self.write_solution(it) + # ESTIMATE + eta_h, eta = self.estimate_error(u_err, z_lo, z_err) + if abs(eta_h) < self.tolerance: + self.print("Error estimate below tolerance, finished.") + raise StopIteration + elif it == self.options.max_iterations - 1: + self.print(f"Maximum iteration ({self.options.max_iterations}) reached. Exiting.") + raise StopIteration + # MARK + if self.options.uniform_refinement: + self.print("Refining uniformly") + markers = self.set_uniform_cell_markers() + else: + self.print("Computing local refinement indicators eta_K ...") + eta_cell = self.compute_error_indicators(u_err, z_lo, z_err) + self.compute_efficiency_indices(eta_cell, eta_h, eta) + markers = self.set_adaptive_cell_markers(eta_cell) + # REFINE + self.print("Transferring problem to new mesh ...") + self.refine_problem(markers) + return u_out, eta_h + + def print(self, *args, **kwargs): + if self.options.verbose: + PETSc.Sys.Print(*args, **kwargs) + + def write_solution(self, it): + should_write = False + write_solution = self.options.write_solution + if write_solution == "all": + should_write = True + elif write_solution == "first_and_last": + if it == 0 or it == self.options.max_iterations: + should_write = True + elif self.options.write_solution_frequency is not None: + freq = int(self.options.write_solution_frequency) + if freq <= 0: + raise ValueError("write_solution_frequency must be a positive integer") + should_write = (it % freq == 0) + if should_write: + output_dir = self.options.output_dir + run_name = self.options.run_name + prefix = f"{output_dir}/{run_name}/{run_name}" + self.print("Writing (primal) solution ...") + VTKFile(f"{prefix}_solution_{it}.pvd").write(*self.problem.u.subfunctions) + self.print("Writing (dual) solution ...") + VTKFile(f"{prefix}_dual_solution_{it}.pvd").write(*self.z.subfunctions) + + +def residual(F, test): + """Replace the argument of a linear Form""" + v, = F.arguments() + return replace(F, {v: test}) + + +def both(u): + """Add u on both sides of a facet""" + return u("+") + u("-") + + +def as_mixed(exprs): + """Flatten a list of ufl.Expr objects into a vector""" + return as_vector([e[idx] for e in exprs for idx in np.ndindex(e.ufl_shape)]) + + +def reconstruct_degree(V, degree): + """Reconstruct a FunctionSpace with a different degree""" + return V.reconstruct(element=PMGPC.reconstruct_degree(V.ufl_element(), degree)) + + +def reconstruct_bcs(bcs, V): + """Reconstruct a list of bcs on a different FunctionSpace""" + bcs = [bc.reconstruct(V=V, indices=bc._indices) for bc in bcs] + return bcs diff --git a/firedrake/mg/adaptive_hierarchy.py b/firedrake/mg/adaptive_hierarchy.py index fc6c2f728a..5c8ea2dc96 100644 --- a/firedrake/mg/adaptive_hierarchy.py +++ b/firedrake/mg/adaptive_hierarchy.py @@ -21,7 +21,7 @@ class AdaptiveMeshHierarchy(HierarchyBase): """ def __init__(self, base_mesh: MeshGeometry, nested: bool = True): self.meshes = [] - self._meshes = [] + self._meshes = self.meshes self.nested = nested self.add_mesh(base_mesh) @@ -35,7 +35,6 @@ def add_mesh(self, mesh: MeshGeometry): The mesh to be added to the finest level. """ level = len(self.meshes) - self._meshes.append(mesh) self.meshes.append(mesh) set_level(mesh, self, level) diff --git a/firedrake/mg/ufl_utils.py b/firedrake/mg/ufl_utils.py index d5a26a36e6..4b997d07b4 100644 --- a/firedrake/mg/ufl_utils.py +++ b/firedrake/mg/ufl_utils.py @@ -122,7 +122,7 @@ def coarsen_formsum(form, self, coefficient_mapping=None): @coarsen.register(firedrake.DirichletBC) def coarsen_bc(bc, self, coefficient_mapping=None): V = self(bc.function_space(), self, coefficient_mapping=coefficient_mapping) - val = self(bc.function_arg, self, coefficient_mapping=coefficient_mapping) + val = self(bc._original_arg, self, coefficient_mapping=coefficient_mapping) subdomain = bc.sub_domain return type(bc)(V, val, subdomain) @@ -145,18 +145,30 @@ def coarsen_equation_bc(ebc, self, coefficient_mapping=None): @coarsen.register(firedrake.functionspaceimpl.WithGeometryBase) def coarsen_function_space(V, self, coefficient_mapping=None): - if hasattr(V, "_coarse"): + # Handle MixedFunctionSpace : V.reconstruct requires MeshSequence. + mesh = V.mesh() if V.index is None else V.parent.mesh() + new_mesh = self(mesh, self) + if self == coarsen and hasattr(V, "_coarse") and V._coarse.mesh() == new_mesh: return V._coarse - - V_fine = V - # Handle MixedFunctionSpace : V_fine.reconstruct requires MeshSequence. - fine_mesh = V_fine.mesh() if V_fine.index is None else V_fine.parent.mesh() - mesh_coarse = self(fine_mesh, self) - name = f"coarse_{V.name}" if V.name else None - V_coarse = V_fine.reconstruct(mesh=mesh_coarse, name=name) - V_coarse._fine = V_fine - V_fine._coarse = V_coarse - return V_coarse + elif self == refine and hasattr(V, "_fine") and V._fine.mesh() == new_mesh: + return V._fine + name = V.name + if name is not None: + level_increment = -1 if self == coarsen else 1 + try: + name, prev_level = name.split("_level_") + except ValueError: + prev_level = 0 + level = int(prev_level) + level_increment + name = f"{name}_level_{level}" + V_new = V.reconstruct(mesh=new_mesh, name=name) + if self == coarsen: + V_new._fine = V + V._coarse = V_new + elif self == refine: + V_new._coarse = V + V._fine = V_new + return V_new @coarsen.register(firedrake.Cofunction) @@ -168,7 +180,16 @@ def coarsen_function(expr, self, coefficient_mapping=None): if new is None: Vf = expr.function_space() Vc = self(Vf, self) - new = firedrake.Function(Vc, name=f"coarse_{expr.name()}") + name = expr.name() + if name is not None: + level_increment = -1 if self == coarsen else 1 + try: + name, prev_level = name.split("_level_") + except ValueError: + prev_level = 0 + level = int(prev_level) + level_increment + name = f"{name}_level_{level}" + new = firedrake.Function(Vc, name=name) manager = get_transfer_manager(Vf.dm) if is_dual(expr): manager.restrict(expr, new) @@ -180,8 +201,14 @@ def coarsen_function(expr, self, coefficient_mapping=None): @coarsen.register(firedrake.NonlinearVariationalProblem) def coarsen_nlvp(problem, self, coefficient_mapping=None): - if hasattr(problem, "_coarse"): - return problem._coarse + # Have we done this already? + mh, _ = utils.get_level(problem.u.function_space().mesh()) + if self == coarsen and hasattr(problem, "_coarse"): + if mh is utils.get_level(problem._coarse.u.function_space().mesh())[0]: + return problem._coarse + elif self == refine and hasattr(problem, "_fine"): + if mh is utils.get_level(problem._fine.u.function_space().mesh())[0]: + return problem._fine def inject_on_restrict(fine, restriction, rscale, injection, coarse): manager = get_transfer_manager(fine) @@ -222,10 +249,13 @@ def inject_on_restrict(fine, restriction, rscale, injection, coarse): Jp = self(problem.Jp, self, coefficient_mapping=coefficient_mapping) u = coefficient_mapping[problem.u_restrict] - fine = problem + orig = problem problem = firedrake.NonlinearVariationalProblem(F, u, bcs=bcs, J=J, Jp=Jp, is_linear=problem.is_linear, form_compiler_parameters=problem.form_compiler_parameters) - fine._coarse = problem + if self == coarsen: + orig._coarse = problem + elif self == refine: + orig._fine = problem return problem @@ -261,9 +291,8 @@ def coarsen_snescontext(context, self, coefficient_mapping=None): coefficient_mapping = {} # Have we already done this? - coarse = context._coarse - if coarse is not None: - return coarse + if self == coarsen and context._coarse is not None: + return context._coarse problem = self(context._problem, self, coefficient_mapping=coefficient_mapping) appctx = context.appctx @@ -387,6 +416,44 @@ def coarsen_slate_tensor_op(tensor, self, coefficient_mapping=None): return type(tensor)(*children) +@singledispatch +def refine(expr, self, coefficient_mapping=None): + return coarsen(expr, self, coefficient_mapping=coefficient_mapping) # fallback to original + + +@refine.register(ufl.Mesh) +@refine.register(ufl.MeshSequence) +def refine_mesh(mesh, self, coefficient_mapping=None): + hierarchy, level = utils.get_level(mesh) + if hierarchy is None: + raise CoarseningError("No mesh hierarchy available") + return hierarchy[level + 1] + + +@refine.register(firedrake.Cofunction) +@refine.register(firedrake.Function) +def refine_function(expr, self, coefficient_mapping=None): + if coefficient_mapping is None: + coefficient_mapping = {} + new = coefficient_mapping.get(expr) + if new is None: + Vf = expr.function_space() + Vc = self(Vf, self) + name = expr.name() + if name is not None: + level_increment = -1 if self == coarsen else 1 + try: + name, prev_level = name.split("_level_") + except ValueError: + prev_level = 0 + level = int(prev_level) + level_increment + name = f"{name}_level_{level}" + new = firedrake.Function(Vc, name=name) + new.interpolate(expr) + coefficient_mapping[expr] = new + return new + + class Interpolation(object): def __init__(self, Vcoarse, Vfine, manager, cbcs=None, fbcs=None): self.cprimal = firedrake.Function(Vcoarse) diff --git a/tests/firedrake/demos/test_demos_run.py b/tests/firedrake/demos/test_demos_run.py index 7202ddbc5f..c33e8796ac 100644 --- a/tests/firedrake/demos/test_demos_run.py +++ b/tests/firedrake/demos/test_demos_run.py @@ -52,7 +52,8 @@ Demo(("saddle_point_pc", "saddle_point_systems"), ["hypre", "mumps"]), Demo(("fast_diagonalisation", "fast_diagonalisation_poisson"), ["mumps"]), Demo(('vlasov_poisson_1d', 'vp1d'), []), - Demo(('shape_optimization', 'shape_optimization'), ["adjoint", "vtk"]) + Demo(('shape_optimization', 'shape_optimization'), ["adjoint", "vtk"]), + Demo(('goal_based_adaptivity_bvp', 'goal_based_adaptivity_bvp'), ["netgen"]), ] PARALLEL_DEMOS = [ Demo(("full_waveform_inversion", "full_waveform_inversion"), ["adjoint"]),