r"""Solves a grad-div system in 2D with Dirichlet boundary conditions using a PINN. .. math:: -\nabla (\nabla \cdot u) + u & = f in \Omega \times M \\ u & = g on \partial \Omega \times M where :math:`u: \Omega \times M \to \mathbb{R}^2` is the unknown function, :math:`\Omega \subset \mathbb{R}^2` is the spatial domain and :math:`M \subset \mathbb{R}` is the parametric domain. The equation is solved on a square domain; weak boundary conditions are used. Three training strategies are compared: standard PINNs, PINNs with energy natural gradient preconditioning, PINNs with Anagram preconditioning and PINNs with Nystrom Natural Gradient preconditioning. """ from typing import Callable, Tuple import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.abstract_space import AbstractApproxSpace from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.physical_models.elliptic_pde.abstract_elliptic_pde import ( StrongFormEllipticPDE, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor from scimba_torch.utils.typing_protocols import VarArgCallable class GradDiv2D(StrongFormEllipticPDE): def __init__( self, space: AbstractApproxSpace, f: Callable, g: Callable, **kwargs, ): super().__init__( space, linear=True, residual_size=2, bc_residual_size=2, **kwargs ) self.space = space self.f = f self.g = g def grad( self, w: torch.Tensor | MultiLabelTensor, y: torch.Tensor | LabelTensor, ) -> torch.Tensor | Tuple[torch.Tensor, ...]: return self.space.grad(w, y) def rhs( self, w: MultiLabelTensor, x: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: return self.f(x, mu) def bc_rhs( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: return self.g(x, mu) def operator( self, w: MultiLabelTensor, xs: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: x, y = xs.get_components() u, v = w.get_components() u_x, u_y = self.grad(u, xs) u_xx, u_xy = self.grad(u_x, xs) v_x, v_y = self.grad(v, xs) v_yx, v_yy = self.grad(v_y, xs) return u_xx + v_yx + u, u_xy + v_yy + v def restrict_to_component(self, i: int, func): return lambda *args: func(*args)[i : i + 1, ...] def functional_operator( self, func: VarArgCallable, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: uv_x = func(x, mu, theta) grad_u = self.restrict_to_component(0, torch.func.jacrev(func, 0)) grad_v = self.restrict_to_component(1, torch.func.jacrev(func, 0)) hessian_u = torch.func.jacrev(grad_u, 0)(x, mu, theta).squeeze() hessian_v = torch.func.jacrev(grad_v, 0)(x, mu, theta).squeeze() res = hessian_u[..., 0] + hessian_v[..., 1] + uv_x return res # Dirichlet conditions def bc_operator( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> Tuple[torch.Tensor]: u, v = w.get_components() return u, v def functional_operator_bc( self, func: VarArgCallable, x: torch.Tensor, n: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: return func(x, mu, theta) def exact_solution(xs: LabelTensor, mu: LabelTensor) -> torch.Tensor: x, y = xs.get_components() alpha = mu.get_components() return torch.cat( ( torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y), alpha * torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y), ), dim=-1, ) def f_rhs(xs: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: x, y = xs.get_components() alpha = mu.get_components() PI = torch.pi cos_x = torch.cos(2.0 * PI * x) cos_y = torch.cos(2.0 * PI * y) sin_x = torch.sin(2.0 * PI * x) sin_y = torch.sin(2.0 * PI * y) f1 = (1 - 4 * PI**2) * sin_x * sin_y + 4 * PI**2 * alpha * cos_x * cos_y f2 = (1 - 4 * PI**2 * alpha) * sin_x * sin_y + 4 * PI**2 * cos_x * cos_y return f1, f2 def f_bc(xs: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: x, _ = xs.get_components() return torch.zeros_like(x), torch.zeros_like(x) bc_weight = 10.0 # in_weights = [3., 1.] domain_mu = [(0.75, 0.75 + 1e-4)] domain_x = Square2D([(0.0, 1), (0.0, 1)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler(domain_mu)] ) space = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64]) pde = GradDiv2D(space, f_rhs, f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt_2 = { "name": "lbfgs", "switch_at_epoch_ratio": 0.7, } pinns = PinnsElliptic( pde, # in_weights=in_weights, bc_type="weak", bc_weight=bc_weight, optimizers=[opt_1, opt_2], one_loss_by_equation=True, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns" # the post_fix of the file where to load the pinn save_post_fix = "pinns" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns.solve( epochs=1000, n_collocation=3000, n_bc_collocation=1000, verbose=False, ) pinns.save(__file__, save_post_fix) print(f"Training time: {pinns.training_time:.2f} seconds") print(f"Best Loss : {pinns.best_loss:.2e}") # plot_abstract_approx_spaces( # pinns.space, # domain_x, # domain_mu, # loss=pinns.losses, # residual=pde, # # solution=exact_solution, # error=exact_solution, # draw_contours=True, # n_drawn_contours=20, # title="solving GradDiv2D with PinnsElliptic", # ) # # plt.show() space2 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64]) pde2 = GradDiv2D(space2, f_rhs, f_bc) pinns2 = NaturalGradientPinnsElliptic( pde2, # in_weights=in_weights, bc_type="weak", bc_weight=bc_weight, one_loss_by_equation=True, matrix_regularization=1e-6, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns_ENG" # the post_fix of the file where to load the pinn save_post_fix = "pinns_ENG" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns2.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns2.solve( epochs=200, n_collocation=3000, n_bc_collocation=1000, verbose=False, ) pinns2.save(__file__, save_post_fix) print(f"Training time: {pinns2.training_time:.2f} seconds") print(f"Best Loss : {pinns2.best_loss:.2e}") # plot_abstract_approx_spaces( # ( # pinns.space, # pinns2.space, # ), # domain_x, # domain_mu, # loss=( # pinns.losses, # pinns2.losses, # ), # residual=( # pde, # pde2, # ), # error=exact_solution, # draw_contours=True, # n_drawn_contours=20, # title="solving GradDiv2D with PinnsElliptic", # titles=("no preconditioning", "ENG preconditioning"), # ) # # plt.show() space3 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64]) pde3 = GradDiv2D(space3, f_rhs, f_bc) pinns3 = NaturalGradientPinnsElliptic( pde3, # in_weights=in_weights, bc_type="weak", bc_weight=bc_weight, one_loss_by_equation=True, ng_algo="ANaGRAM", svd_threshold=5e-2, ) solving_mode = "load" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns_Anagram" # the post_fix of the file where to load the pinn save_post_fix = "pinns_Anagram" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns3.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns3.solve( epochs=200, n_collocation=3000, n_bc_collocation=1000, verbose=False, ) pinns3.save(__file__, save_post_fix) print(f"Training time: {pinns3.training_time:.2f} seconds") print(f"Best Loss : {pinns3.best_loss:.2e}") # plot_abstract_approx_spaces( # ( # pinns2.space, # pinns3.space, # ), # domain_x, # domain_mu, # loss=( # pinns2.losses, # pinns3.losses, # ), # residual=( # pde2, # pde3, # ), # # solution=exact_solution, # error=exact_solution, # draw_contours=True, # n_drawn_contours=20, # title="solving GradDiv2D with PinnsElliptic", # titles=("ENG preconditioning", "Anagram preconditioning"), # ) # # plt.show() torch.manual_seed(0) space4 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64]) pde4 = GradDiv2D(space4, f_rhs, f_bc) pinns4 = NaturalGradientPinnsElliptic( pde4, bc_type="weak", bc_weight=bc_weight, one_loss_by_equation=True, ng_algo="Nyström", ) solving_mode = "new" # can be "new" for solve from scratch, # "resume" for load from file and continue solving, # or "load" for load from file load_post_fix = "pinns_NNG" # the post_fix of the file where to load the pinn save_post_fix = "pinns_NNG" # the post_fix of the file where to save the pinn new_solving = solving_mode == "new" resume_solving = solving_mode == "resume" try_load = resume_solving or (not new_solving) load_ok = try_load and pinns4.load(__file__, load_post_fix) solve = (not load_ok) or (resume_solving or new_solving) if solve: pinns4.solve( epochs=200, n_collocation=3000, n_bc_collocation=1000, verbose=False, ) pinns4.save(__file__, save_post_fix) print(f"Training time: {pinns4.training_time:.2f} seconds") print(f"Best Loss : {pinns4.best_loss:.2e}") plot_abstract_approx_spaces( ( pinns2.space, pinns4.space, ), domain_x, domain_mu, loss=( pinns2.losses, pinns4.losses, ), residual=( pde2, pde4, ), # solution=exact_solution, error=exact_solution, draw_contours=True, n_drawn_contours=20, title="solving GradDiv2D with PinnsElliptic", titles=("ENG preconditioning", "Nystrom NG preconditioning"), ) plt.show()