r"""Solves a 2D Poisson PDE with Dirichlet BCs and two parameters. .. math:: -\mu \delta u & = f in \Omega \times M \\ u & = g on \partial \Omega \times M where :math:`x = (x_1, x_2) \in \Omega = \mathcal{D}` (with \mathcal{D} being the disk with center :math:`(x_1^0, x_2^0) = (-0.5, 0.5)`), :math:`f` such that :math:`u(x_1, x_2, \mu) = \mu_1 \exp{- \mu_2 (x_1^2 + x_2^2)}`, :math:`g = 0` and :math:`\mu \in M = [1, 2] \times [0.1, 0.5]`. Boundary conditions are enforced strongly. The neural network used is a simple MLP (Multilayer Perceptron), and the optimization is done using Natural Gradient Descent. This example also tests uniform sampling versus Sobol sampling for the space domain and the parameters, and compares the results of the two integration methods. """ import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Disk2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import ( SobolParametricSampler, UniformParametricSampler, ) from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( NaturalGradientPinnsElliptic, ) from scimba_torch.physical_models.elliptic_pde.laplacians import ( Laplacian2DDirichletStrongForm, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor from scimba_torch.utils.verbosity import set_verbosity set_verbosity(True) class Laplacian2DDirichletStrongForm2Params(Laplacian2DDirichletStrongForm): def operator(self, w, x: LabelTensor, mu: LabelTensor) -> torch.Tensor: """Compute the differential operator of the PDE. Args: w: State tensor. x: Spatial coordinates tensor. mu: Parameter tensor. Returns: The result of applying the operator to the state. """ u = w.get_components() alpha, sigma = mu.get_components() u_x, u_y = self.grad(u, x) u_xx, _ = self.grad(u_x, x) _, u_yy = self.grad(u_y, x) return -alpha * (u_xx + u_yy) def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1, sigma = mu.get_components() return mu1 * torch.exp(-sigma * (x1**2 + x2**2)) boundary_value = exact_sol( LabelTensor(torch.tensor([[0.0, 1.0]], dtype=torch.double)), LabelTensor(torch.tensor([[1.0, 0.25]], dtype=torch.double)), ) domain_x = Disk2D((0.0, 0.0), 1, is_main_domain=True) domain_mu = [[1.0, 2.0], [0.1, 0.5]] def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1, sigma = mu.get_components() # return torch.ones_like(x1) a = x1**2 + x2**2 return -4 * mu1**2 * sigma * (-1.0 + sigma * a) * torch.exp(-sigma * a) def f_bc(x: LabelTensor, mu: LabelTensor): x1, _ = x.get_components() mu1, sigma = mu.get_components() boundary_value = mu1 * torch.exp(-sigma) return x1 * 0.0 + boundary_value def post_processing(inputs: torch.Tensor, x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1, sigma = mu.get_components() phi = x1**2.0 + x2**2.0 - 1.0 boundary_value = mu1 * torch.exp(-sigma) return inputs * phi + boundary_value def functional_post_processing( func, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor ) -> torch.Tensor: phi = x[0] ** 2.0 + x[1] ** 2.0 - 1.0 boundary_value = mu[0] * torch.exp(-mu[1]) return func(x, mu, theta) * phi + boundary_value def create_sampler(sobol_space: bool, sobol_param: bool): sampler_x = DomainSampler(domain_x, sobol=sobol_space) sampler_mu = ( SobolParametricSampler(domain_mu) if sobol_param else UniformParametricSampler(domain_mu) ) return TensorizedSampler([sampler_x, sampler_mu]) def create_space(sampler): return NNxSpace( 1, 2, GenericMLP, domain_x, sampler, layer_sizes=[20] * 3, post_processing=post_processing, ) def create_pinn(space): pde = Laplacian2DDirichletStrongForm2Params(space, f=f_rhs, g=f_bc) return NaturalGradientPinnsElliptic( pde, bc_type="strong", functional_post_processing=functional_post_processing ) list_of_pinns = [] for sobol_space in [False, True]: for sobol_param in [False, True]: sampler = create_sampler(sobol_space, sobol_param) space = create_space(sampler) pinn = create_pinn(space) pinn.solve(epochs=100, n_collocation=900, n_bc_collocation=200, verbose=False) list_of_pinns.append(pinn) title = ( "Solving " + r"$-\mu\nabla^2 u = \mu(1-\sigma(x^2 + y^2))e^{-\sigma(x^2 + y^2)}$" + " on the unit disk" ) def norm_of_grad(space, xy, mu): xy.x.requires_grad_() w = space.evaluate(xy, mu) u = w.get_components() ux, uy = space.grad(u, xy) return torch.sqrt(ux**2 + uy**2) additional_scalar_functions = {"$\\|\\nabla u\\|_2$": norm_of_grad} plot_abstract_approx_spaces( [pinn.space for pinn in list_of_pinns], domain_x, domain_mu, loss=[pinn.losses for pinn in list_of_pinns], residual=[pinn.pde for pinn in list_of_pinns], error=exact_sol, additional_scalar_functions=additional_scalar_functions, draw_contours=True, n_drawn_contours=20, parameters_values="mean", title=title, titles=( "Uniform space, Uniform parameters", "Uniform space, Sobol parameters", "Sobol space, Uniform parameters", "Sobol space, Sobol parameters", ), ) plt.show()