r"""Solves a grad-div system in 2D with Dirichlet boundary conditions using a PINN. .. math:: -\nabla (\nabla \cdot u) + u & = f in \Omega \times M \\ u & = g on \partial \Omega \times M where :math:`u: \Omega \times M \to \mathbb{R}^2` is the unknown function, :math:`\Omega \subset \mathbb{R}^2` is the spatial domain and :math:`M \subset \mathbb{R}` is the parametric domain. The equation is solved on a square domain; strong boundary conditions are used. Three training strategies are compared: standard PINNs, PINNs with energy natural gradient preconditioning and PINNs with Anagram preconditioning. """ from typing import Callable, Tuple import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.abstract_space import AbstractApproxSpace from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( AnagramPinnsElliptic, NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor from scimba_torch.utils.typing_protocols import VarArgCallable class GradDiv2D: def __init__( self, space: AbstractApproxSpace, f: Callable, g: Callable, **kwargs, ): self.space = space self.f = f self.g = g self.linear = True self.residual_size = 2 self.bc_residual_size = 2 def grad( self, w: torch.Tensor | MultiLabelTensor, y: torch.Tensor | LabelTensor, ) -> torch.Tensor | Tuple[torch.Tensor, ...]: return self.space.grad(w, y) def rhs( self, w: MultiLabelTensor, x: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: return self.f(x, mu) def bc_rhs( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: return self.g(x, mu) def operator( self, w: MultiLabelTensor, xs: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: x, y = xs.get_components() u, v = w.get_components() u_x, u_y = self.grad(u, xs) u_xx, u_xy = self.grad(u_x, xs) v_x, v_y = self.grad(v, xs) v_yx, v_yy = self.grad(v_y, xs) return u_xx + v_yx + u, u_xy + v_yy + v def restrict_to_component(self, i: int, func): return lambda *args: func(*args)[i : i + 1, ...] def functional_operator( self, func: VarArgCallable, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: uv_x = func(x, mu, theta) grad_u = self.restrict_to_component(0, torch.func.jacrev(func, 0)) grad_v = self.restrict_to_component(1, torch.func.jacrev(func, 0)) hessian_u = torch.func.jacrev(grad_u, 0)(x, mu, theta).squeeze() hessian_v = torch.func.jacrev(grad_v, 0)(x, mu, theta).squeeze() res = hessian_u[..., 0] + hessian_v[..., 1] + uv_x return res # Dirichlet conditions def bc_operator( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: u, v = w.get_components() return u, v def functional_operator_bc( self, func: VarArgCallable, x: torch.Tensor, n: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: return func(x, mu, theta) def exact_solution(xs: LabelTensor, mu: LabelTensor) -> torch.Tensor: x, y = xs.get_components() alpha = mu.get_components() return torch.cat( ( torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y), alpha * torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y), ), dim=-1, ) def f_rhs(xs: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: x, y = xs.get_components() alpha = mu.get_components() PI = torch.pi cos_x = torch.cos(2.0 * PI * x) cos_y = torch.cos(2.0 * PI * y) sin_x = torch.sin(2.0 * PI * x) sin_y = torch.sin(2.0 * PI * y) f1 = (1 - 4 * PI**2) * sin_x * sin_y + 4 * PI**2 * alpha * cos_x * cos_y f2 = (1 - 4 * PI**2 * alpha) * sin_x * sin_y + 4 * PI**2 * cos_x * cos_y return f1, f2 def f_bc(x: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: x = x.get_components() return torch.zeros_like(x), torch.zeros_like(x) def post_processing(inputs: torch.Tensor, xs: LabelTensor, mu: LabelTensor): x, y = xs.get_components() # _ = mu.get_components() phi = x * (x - 1.0) * y * (y - 1.0) return inputs * phi def functional_post_processing( func, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor ) -> torch.Tensor: phi = x[0] * (x[0] - 1.0) * x[1] * (x[1] - 1.0) return func(x, mu, theta) * phi domain_mu = [(0.75, 0.75)] domain_x = Square2D([(0.0, 1), (0.0, 1)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler(domain_mu)] ) space = NNxSpace( 2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing, ) pde = GradDiv2D(space, f_rhs, f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt_2 = { "name": "lbfgs", "switch_at_epoch_ratio": 0.7, } pinns = PinnsElliptic( pde, bc_type="strong", optimizers=OptimizerData(opt_1, opt_2), one_loss_by_equation=True, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns" # the post_fix of the file where to load the pinn save_post_fix = "pinns" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns.solve(epochs=1000, n_collocation=3000, verbose=True) pinns.save(__file__, save_post_fix) pinns.space.load_from_best_approx() plot_abstract_approx_spaces( pinns.space, domain_x, domain_mu, loss=pinns.losses, residual=pde, # solution=exact_solution, error=exact_solution, draw_contours=True, n_drawn_contours=20, title="solving GradDiv2D with PinnsElliptic", ) plt.show() space2 = NNxSpace( 2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing, ) pde2 = GradDiv2D(space2, f_rhs, f_bc) pinns2 = NaturalGradientPinnsElliptic( pde2, bc_type="strong", one_loss_by_equation=True, matrix_regularization=1e-6, functional_post_processing=functional_post_processing, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns_ENG" # the post_fix of the file where to load the pinn save_post_fix = "pinns_ENG" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns2.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns2.solve(epochs=200, n_collocation=3000, verbose=True) pinns2.save(__file__, save_post_fix) plot_abstract_approx_spaces( ( pinns.space, pinns2.space, ), domain_x, domain_mu, loss=( pinns.losses, pinns2.losses, ), residual=( pde, pde2, ), error=exact_solution, draw_contours=True, n_drawn_contours=20, title="solving GradDiv2D with PinnsElliptic", titles=("no preconditioning", "ENG preconditioning"), ) plt.show() space3 = NNxSpace( 2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing, ) pde3 = GradDiv2D(space3, f_rhs, f_bc) pinns3 = AnagramPinnsElliptic( pde3, bc_type="strong", one_loss_by_equation=True, svd_threshold=5e-3, functional_post_processing=functional_post_processing, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns_Anagram" # the post_fix of the file where to load the pinn save_post_fix = "pinns_Anagram" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns3.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns3.solve(epochs=200, n_collocation=3000, verbose=True) pinns3.save(__file__, save_post_fix) plot_abstract_approx_spaces( ( pinns2.space, pinns3.space, ), domain_x, domain_mu, loss=( pinns2.losses, pinns3.losses, ), residual=( pde2, pde3, ), # solution=exact_solution, error=exact_solution, draw_contours=True, n_drawn_contours=20, title="solving GradDiv2D with PinnsElliptic", titles=("ENG preconditioning", "Anagram preconditioning"), ) plt.show()