r"""Solves a 2D linear second-order PDE using PINNs. .. math:: -\nabla \cdot (A \nabla u) + b \cdot \nabla u + c u & = f in \Omega \\ u & = g on \partial \Omega where :math:`x = (x_1, x_2) \in \Omega = (0, 1) \times (0, 1)`, :math:`f` such that :math:`u(x_1, x_2) = \sin(\pi x_1) \sin(\pi x_2)` and :math:`g = 0`, with :math:`A = [(1, 1), (1, 1)]`, :math:`b = (1, 1)^T` and :math:`c = 1`. Boundary conditions are enforced weakly. The neural network used is a simple MLP (Multilayer Perceptron), and the optimization is done using either Adam, Natural Gradient Descent or Anagram. This example illustrates the use of the `LinearOrder2PDE` framework. """ import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Disk2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( AnagramPinnsElliptic, NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.elliptic_pde.linear_order_2 import LinearOrder2PDE from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor torch.manual_seed(0) bc_weight = 40.0 alpha = 5 # sol exacte : u(x) = mu*sin(2*pi*x1)*sin(2*pi*x2) def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() # mu1 = mu.get_components() return torch.exp(-alpha * (x1**2 + x2**2)) bord = exact_sol(LabelTensor(torch.tensor([[0.0, 1.0]], dtype=torch.double)), None) # print(bord) def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() # mu1 = mu.get_components() return torch.exp(-alpha * (x1**2 + x2**2)) * ( 2 * alpha * (2 - 2 * alpha * (x1 + x2) ** 2) - 2 * alpha * (x1 + x2) + 1 ) def f_bc(x: LabelTensor, mu: LabelTensor): x1, _ = x.get_components() return x1 * 0.0 + bord.item() def A(x: torch.Tensor) -> torch.Tensor: # noqa: N802 return torch.ones( x.shape[0], 2, 2, dtype=torch.get_default_dtype(), device=torch.get_default_device(), ) def b(x: torch.Tensor) -> torch.Tensor: return torch.ones( x.shape[0], 2, dtype=torch.get_default_dtype(), device=torch.get_default_device(), ) def c(x: torch.Tensor) -> torch.Tensor: return torch.ones( x.shape[0], 1, dtype=torch.get_default_dtype(), device=torch.get_default_device(), ) domain_x = Disk2D((0.0, 0.0), 1, is_main_domain=True) # domain_x = Square2D([(-0.5, 0.5), (-0.5, 0.5)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([(1.0, 1.0 + 1e-5)])] ) space = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], # post_processing=post_processing, ) pde = LinearOrder2PDE(space, 2, f=f_rhs, g=f_bc, A=A, b=b, c=c) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } pinns = PinnsElliptic( pde, bc_type="weak", optimizers=OptimizerData(opt_1), bc_weight=bc_weight ) resume_solve = True if resume_solve or not pinns.load(__file__, "pinns"): pinns.solve(epochs=1000, n_collocation=3000, verbose=True) pinns.save(__file__, "pinns") pinns.space.load_from_best_approx() space2 = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], # post_processing=post_processing, ) pde2 = LinearOrder2PDE(space2, 2, f=f_rhs, g=f_bc, A=A, b=b, c=c) pinns2 = NaturalGradientPinnsElliptic(pde2, bc_type="weak", bc_weight=bc_weight) resume_solve = True if resume_solve or not pinns2.load(__file__, "ENG"): # load from pinns.space pinns2.space.load_from_dict(pinns.space.dict_for_save()) pinns2.space.load_from_best_approx() pinns2.solve(epochs=200, n_collocation=2000, verbose=True, n_bc_collocation=500) pinns2.save(__file__, "ENG") pinns2.space.load_from_best_approx() space3 = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], # post_processing=post_processing, ) pde3 = LinearOrder2PDE(space3, 2, f=f_rhs, g=f_bc, A=A, b=b, c=c) pinns3 = AnagramPinnsElliptic( pde3, bc_type="weak", bc_weight=bc_weight, svd_threshold=1e-4 ) resume_solve = True if resume_solve or not pinns3.load(__file__, "anagram"): # load from pinns.space pinns3.space.load_from_dict(pinns.space.dict_for_save()) pinns3.space.load_from_best_approx() pinns3.solve(epochs=300, n_collocation=2000, verbose=True, n_bc_collocation=500) pinns3.save(__file__, "anagram") pinns3.space.load_from_best_approx() # for plotting pinns, pinns2, pinns 3 plot_abstract_approx_spaces( (pinns.space, pinns2.space, pinns3.space), # an Iterable of AbstractSpace domain_x, # either a VolumetricDomain, or an Iterable of VolumetricDomain of length 1 or len(first argument) ((1.0, 1.0 + 1e-5),), loss=( pinns.losses, pinns2.losses, pinns3.losses, ), # same as previously; if only one is given, it will be used for all spaces # residual=( # pinns.pde, # pinns2.pde, # pinns3.pde # ), # same as previously; if only one is given, it will be used for all spaces solution=exact_sol, error=exact_sol, # same as previously; if only one is given, it will be used for all spaces draw_contours=True, n_drawn_contours=20, parameters_values="random", ) plt.show()