r"""Solves the 2D inhomogeneous Helmholtz PDE with homogeneous Dirichlet BCs using PINNs. .. math:: \delta u + k^2 u & = f in \Omega \\ u & = g on \partial \Omega where :math:`x = (x_1, x_2) \in \Omega = (0, 1) \times (0, 1)`, :math:`g(x_1, x_2) = \sinh(x_1) + \cosh(x_2)` and :math:`k = 2`. The boundary conditions are enforced strongly. The neural network used is a simple MLP (Multilayer Perceptron) with Fourier features, and the optimization is done using either Adam, Natural Gradient Descent, or Anagram. """ from typing import Callable import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.abstract_space import AbstractApproxSpace from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.features import FourierMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( AnagramPinnsElliptic, NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor from scimba_torch.utils.typing_protocols import VarArgCallable torch.manual_seed(0) class HelmholtzDirichlet: def __init__( self, space: AbstractApproxSpace, spatial_dim: int, f: Callable, g: Callable, **kwargs, ): self.space = space self.spatial_dim = spatial_dim self.f = f self.g = g self.linear = True self.residual_size = 1 self.bc_residual_size = 1 def grad( self, w: torch.Tensor | MultiLabelTensor, y: torch.Tensor | LabelTensor, ) -> torch.Tensor | tuple[torch.Tensor, ...]: return self.space.grad(w, y) def rhs(self, w: MultiLabelTensor, x: LabelTensor, mu: LabelTensor) -> torch.Tensor: return self.f(x, mu) def bc_rhs( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> LabelTensor: return self.g(x, mu) def operator( self, w: MultiLabelTensor, x: LabelTensor, mu: LabelTensor ) -> torch.Tensor: u = w.get_components() k = mu.get_components() lambda2_u = k**2 * u grad_u = torch.cat(tuple(self.grad(u, x)), dim=-1) div_grad_u = tuple(self.grad(grad_u[:, 0], x))[0] for i in range(1, self.spatial_dim): div_grad_u += tuple(self.grad(grad_u[:, i], x))[i] return div_grad_u - lambda2_u def functional_operator( self, func: VarArgCallable, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: lambda2_u = mu[0] ** 2 * func(x, mu, theta) grad_u = torch.func.jacrev(func, 0) grad_grad_u = torch.func.jacrev(grad_u, 0)(x, mu, theta).squeeze() div_grad_u = torch.einsum("ii", grad_grad_u) return div_grad_u - lambda2_u # Dirichlet conditions def bc_operator( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> torch.Tensor: u = w.get_components() return u def functional_operator_bc( self, func: VarArgCallable, x: torch.Tensor, n: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, ) -> torch.Tensor: return func(x, mu, theta) def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() k = mu.get_components() return (1 - k**2) * (torch.sinh(x1) + torch.cosh(x2)) def f_bc(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return torch.sinh(x1) + torch.cosh(x2) def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() # mu1 = mu.get_components() return torch.sinh(x1) + torch.cosh(x2) domain_mu = [(2.0, 2.0 + 1e-4)] domain_x = Square2D([(0.0, 1.0), (0.0, 1.0)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler(domain_mu)] ) space = NNxSpace( 1, 1, FourierMLP, domain_x, sampler, layer_sizes=[32], nb_features=10, activation_type="sine", ) pde = HelmholtzDirichlet(space, 2, f=f_rhs, g=f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 9.9e-3, "betas": (0.9, 0.999)}, } opt_2 = { "name": "lbfgs", "switch_at_epoch_ratio": 0.7, } pinns = PinnsElliptic( pde, bc_type="weak", optimizers=OptimizerData(opt_1, opt_2), ) new_solve = True if new_solve or not pinns.load(__file__, "pinns"): pinns.solve(epochs=1000, n_collocation=2000, n_bc_collocation=1000, verbose=True) pinns.save(__file__, "pinns") space2 = NNxSpace( 1, 1, FourierMLP, domain_x, sampler, layer_sizes=[32], nb_features=10, activation_type="sine", ) pde2 = HelmholtzDirichlet(space2, 2, f=f_rhs, g=f_bc) # pinns2 = NaturalGradientPinnsElliptic(pde2, bc_type="weak") # new_solve = True if new_solve or not pinns2.load(__file__, "ENG"): pinns2.solve(epochs=50, n_collocation=2000, n_bc_collocation=1000, verbose=True) pinns2.save(__file__, "ENG") space3 = NNxSpace( 1, 1, FourierMLP, domain_x, sampler, layer_sizes=[32], nb_features=10, activation_type="sine", ) pde3 = HelmholtzDirichlet(space3, 2, f=f_rhs, g=f_bc) pinns3 = AnagramPinnsElliptic( pde3, bc_type="weak", svd_threshold=5e-4, ) new_solve = True if new_solve or not pinns3.load(__file__, "Anagram"): pinns3.solve(epochs=50, n_collocation=2000, n_bc_collocation=1000, verbose=True) pinns3.save(__file__, "Anagram") plot_abstract_approx_spaces( ( pinns.space, pinns2.space, pinns3.space, ), domain_x, domain_mu, loss=( pinns.losses, pinns2.losses, pinns3.losses, ), residual=( pinns.pde, pinns2.pde, pinns3.pde, ), error=exact_sol, draw_contours=True, n_drawn_contours=20, parameters_values="mean", title=r"Helmholtz equation $\Delta u - \lambda^2 u = (1-\lambda^2)(\sinh(x) + \cosh(x))$", titles=("no preconditioner", "ENG preconditioner", "Anagram preconditioner"), ) plt.show()