r"""Solves a 2D Poisson PDE with Dirichlet boundary conditions using PINNs. .. math:: -\mu \delta u & = f in \Omega \times M \\ u & = g on \partial \Omega \times M where :math:`x = (x_1, x_2) \in \Omega = (0, 1) \times (0, 1)`, :math:`f` such that :math:`u(x_1, x_2, \mu) = \mu \sin(2\pi x_1) \sin(2\pi x_2)`, :math:`g = 0` and :math:`\mu \in M = [1, 2]`. Boundary conditions are enforced either weakly or strongly. The neural network used is a simple MLP (Multilayer Perceptron), and the optimization is done using Adam. """ # %% import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import PinnsElliptic from scimba_torch.optimizers.losses import GenericLosses from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.elliptic_pde.laplacians import ( Laplacian2DDirichletStrongForm, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor torch.manual_seed(0) def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() return ( mu1 * mu1 * 8.0 * torch.pi * torch.pi * torch.sin(2.0 * torch.pi * x1) * torch.sin(2.0 * torch.pi * x2) ) def f_bc(x: LabelTensor, mu: LabelTensor): x1, _ = x.get_components() return x1 * 0.0 def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() return mu1 * torch.sin(2.0 * torch.pi * x1) * torch.sin(2.0 * torch.pi * x2) domain_x = Square2D([(0.0, 1), (0.0, 1)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([(1.0, 2.0)])] ) # %% #### first space: weak boundary conditions ##### space = NNxSpace(1, 1, GenericMLP, domain_x, sampler, layer_sizes=[40]) pde = Laplacian2DDirichletStrongForm(space, f=f_rhs, g=f_bc) losses = GenericLosses( [ ("residual", torch.nn.MSELoss(), 1.0), ("bc", torch.nn.MSELoss(), 40.0), ], ) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt = OptimizerData(opt_1) pinns = PinnsElliptic(pde, bc_type="weak", optimizers=opt, losses=losses) # train for 3000 epochs pinns.solve(epochs=3000, n_collocation=3000, n_bc_collocation=1600, verbose=True) ### plot sampler_plot = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([(1.2, 1.20001)])] ) # for plotting pinns plot_abstract_approx_spaces( pinns.space, # the approximation space domain_x, # the spatial domain [[1.0, 2.0]], # the parameter's domain loss=pinns.losses, # for plot of the loss: the losses residual=pde, # for plot of the residual: the pde solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.2], [0.0, 1.0]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, parameters_values="random", ) plt.show() # %% #### second space: hard-constrained boundary conditions ##### def post_processing(inputs: torch.Tensor, x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return inputs * x1 * (1.0 - x1) * x2 * (1.0 - x2) space2 = NNxSpace( 1, 1, GenericMLP, domain_x, sampler, layer_sizes=[40], post_processing=post_processing, ) pde2 = Laplacian2DDirichletStrongForm(space2, f=f_rhs, g=f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt2 = OptimizerData(opt_1) pinns2 = PinnsElliptic(pde2, bc_type="strong", optimizers=opt2) # train until loss <= 10, for a maximum of 3000 epochs pinns2.solve(max_epochs=3000, loss_target=10, n_collocation=3000, verbose=True) ### plot sampler_plot = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([(1.2, 1.20001)])] ) # for plotting pinns and pinns2 plot_abstract_approx_spaces( ( pinns.space, pinns2.space, ), # an Iterable of AbstractSpace domain_x, # either a VolumetricDomain, or an Iterable of VolumetricDomain of length 1 or len(first argument) [ [1.0, 2.0] ], # either a List[List[float]], or an Iterable of List[List[float]] of length 1 or len(first argument) loss=( pinns.losses, pinns2.losses, ), # same as previously; if only one is given, it will be used for all spaces residual=( pinns.pde, pinns2.pde, ), # same as previously; if only one is given, it will be used for all spaces error=exact_sol, # same as previously; if only one is given, it will be used for all spaces draw_contours=True, n_drawn_contours=20, parameters_values="random", ) plt.show() # %%