r"""Solves a 2D Poisson PDE with Dirichlet BCs using PINNs. .. math:: -\mu \delta u & = f in \Omega \times M \\ u & = g on \partial \Omega \times M where :math:`x = (x_1, x_2) \in \Omega = \mathcal{D}` (with \mathcal{D} being the disk with center :math:`(x_1^0, x_2^0) = (-0.5, 0.5)`), :math:`f` such that :math:`u(x_1, x_2, \mu) = \mu \exp{- \frac 1 4 (x_1^2 + x_2^2)}`, :math:`g = 0` and :math:`\mu \in M = [1, 2]`. Boundary conditions are enforced strongly. The neural network used is a simple MLP (Multilayer Perceptron), and the optimization is done using either Adam or Natural Gradient Descent. """ import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Disk2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.losses import GenericLosses from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.elliptic_pde.laplacians import ( Laplacian2DDirichletStrongForm, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor from scimba_torch.utils.verbosity import set_verbosity set_verbosity(True) sigma = 1.0 / 4.0 def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() return mu1 * torch.exp(-sigma * (x1**2 + x2**2)) boundary_value = exact_sol( LabelTensor(torch.tensor([[0.0, 1.0]], dtype=torch.double)), LabelTensor(torch.tensor([[1.0]], dtype=torch.double)), ) def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() # return torch.ones_like(x1) a = x1**2 + x2**2 return -4 * mu1 * sigma * (-1.0 + sigma * a) * torch.exp(-sigma * a) def f_bc(x: LabelTensor, mu: LabelTensor): x1, _ = x.get_components() mu1 = mu.get_components() return x1 * 0.0 + boundary_value.item() * mu1 domain_x = Disk2D((0.0, 0.0), 1, is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([[1.0, 2.0]])] ) #### first space: Adam #### def post_processing(inputs: torch.Tensor, x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() phi = x1**2.0 + x2**2.0 - 1.0 return inputs * phi + boundary_value.item() * mu1 def functional_post_processing( func, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor ) -> torch.Tensor: phi = x[0] ** 2.0 + x[1] ** 2.0 - 1.0 return func(x, mu, theta) * phi + boundary_value.item() * mu[0] space = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing, ) pde = Laplacian2DDirichletStrongForm(space, f=f_rhs, g=f_bc) losses = GenericLosses([("residual", torch.nn.MSELoss(), 1.0)]) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt = OptimizerData(opt_1) pinns = PinnsElliptic( pde, bc_type="strong", optimizers=OptimizerData(opt_1), losses=losses ) new_solve = False if new_solve or not pinns.load(__file__, "no_precond"): pinns.solve(epochs=1000, n_collocation=3000) pinns.save(__file__, "no_precond", verbose=False) #### second space: natural gradient descent #### space2 = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing, ) pde2 = Laplacian2DDirichletStrongForm(space2, f=f_rhs, g=f_bc) pinns2 = NaturalGradientPinnsElliptic( pde2, bc_type="strong", functional_post_processing=functional_post_processing ) new_solve = False if new_solve or not pinns2.load(__file__, "ENG"): pinns2.solve(epochs=200, n_collocation=900, n_bc_collocation=200, verbose=True) pinns2.save(__file__, "ENG") title = ( "Solving " + r"$-\mu\nabla^2 u = \mu(1-(x^2 + y^2)/4)e^{-(x^2 + y^2)/4}$" + " on the unit disk" ) plot_abstract_approx_spaces( ( pinns.space, pinns2.space, ), # an Iterable of AbstractSpace ( domain_x, ), # either a VolumetricDomain, or an Iterable of VolumetricDomain of length 1 or len(first argument) ([[1.0, 2.0]],), loss=( pinns.losses, pinns2.losses, ), # same as previously; if only one is given, it will be used for all spaces residual=( pinns.pde, pinns2.pde, ), # same as previously; if only one is given, it will be used for all spaces error=exact_sol, draw_contours=True, n_drawn_contours=20, parameters_values="mean", title=title, titles=("not preconditioned PINN", "ENG preconditioned PINN"), ) plt.show() # plot_AbstractApproxSpaces( # pinns2.space, # domain_x, # a VolumetricDomain, # [(1.0, 2.0)], # List[Sequence[float]] # parameters_values=([1.0], [1.5], [2.0]), # draw_contours=True, # n_drawn_contours=20, # ) # plt.show()