r"""Solves a 2D Poisson PDE with Dirichlet boundary conditions using PINNs. .. math:: -\delta u & = f in \Omega where :math:`x = (x_1, x_2) \in \Omega = (0, 1) \times (0, 1)` and :math:`f` such that :math:`u(x_1, x_2) = \sin(2\pi x_1) \sin(2\pi x_2)`, and :math:`g = 0`. The boundary conditions are either homogeneous Dirichlet conditions enforced strongly or weakly, or periodic boundary conditions enforced strongly by using a periodic embedding within the neural network. The neural network is a simple MLP (Multilayer Perceptron). The optimization is done using either Adam or Natural Gradient Descent. """ # %% import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.features import PeriodicMLP from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.losses import GenericLosses from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.elliptic_pde.laplacians import ( Laplacian2DDirichletStrongForm, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor torch.manual_seed(0) def f_rhs(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return 2 * torch.pi**2 * torch.sin(torch.pi * x1) * torch.sin(torch.pi * x2) def f_bc(x: LabelTensor, mu: LabelTensor): x1, _ = x.get_components() return x1 * 0.0 def exact_sol(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return torch.sin(torch.pi * x1) * torch.sin(torch.pi * x2) class Laplacian2DDirichletStrongFormNoParam(Laplacian2DDirichletStrongForm): def operator(self, w, x, mu): u = w.get_components() u_x, u_y = self.grad(u, x) u_xx, _ = self.grad(u_x, x) _, u_yy = self.grad(u_y, x) return -(u_xx + u_yy) def functional_operator(self, func, x, mu, theta): grad_u = torch.func.jacrev(func, 0) hessian_u = torch.func.jacrev(grad_u, 0, chunk_size=None)(x, mu, theta) return -(hessian_u[..., 0, 0] + hessian_u[..., 1, 1]) domain_x = Square2D([(-1, 1), (-1, 1)], is_main_domain=True) sampler = TensorizedSampler([DomainSampler(domain_x), UniformParametricSampler([])]) # %% #### first space: weak boundary conditions ##### space = NNxSpace(1, 0, GenericMLP, domain_x, sampler, layer_sizes=[40]) pde = Laplacian2DDirichletStrongFormNoParam(space, f=f_rhs, g=f_bc) losses = GenericLosses( [ ("residual", torch.nn.MSELoss(), 1.0), ("bc", torch.nn.MSELoss(), 40.0), ], ) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt = OptimizerData(opt_1) pinns = PinnsElliptic(pde, bc_type="weak", optimizers=opt, losses=losses) # train for 3000 epochs pinns.solve(epochs=3000, n_collocation=3000, n_bc_collocation=1600, verbose=True) ### plot sampler_plot = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([])] ) # for plotting pinns plot_abstract_approx_spaces( pinns.space, # the approximation space domain_x, # the spatial domain [], # the parameter's domain loss=pinns.losses, # for plot of the loss: the losses residual=pde, # for plot of the residual: the pde solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.2], [0.0, 1.0]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, ) plt.show() # %% #### second space: hard-constrained boundary conditions ##### def post_processing(inputs: torch.Tensor, x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return inputs * (x1 + 1) * (1 - x1) * (x2 + 1) * (1 - x2) space2 = NNxSpace( 1, 0, GenericMLP, domain_x, sampler, layer_sizes=[40], post_processing=post_processing, ) pde2 = Laplacian2DDirichletStrongFormNoParam(space2, f=f_rhs, g=f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt2 = OptimizerData(opt_1) pinns2 = PinnsElliptic(pde2, bc_type="strong", optimizers=opt2) # train until loss <= 1e-2, for a maximum of 3000 epochs pinns2.solve(max_epochs=3000, loss_target=0.01, n_collocation=3000, verbose=True) # %% #### third space: periodic boundary conditions (no post-processing needed) ##### space3 = NNxSpace( 1, 0, PeriodicMLP, domain_x, sampler, layer_sizes=[20, 20], ) pde3 = Laplacian2DDirichletStrongFormNoParam(space3, f=f_rhs, g=f_bc) opt_1 = { "name": "adam", "optimizer_args": {"lr": 2e-2, "betas": (0.9, 0.999)}, } opt3 = OptimizerData(opt_1) pinns3 = PinnsElliptic(pde3, bc_type="strong", optimizers=opt3) # train until loss <= 2e-3, for a maximum of 3000 epochs pinns3.solve(max_epochs=3000, loss_target=0.002, n_collocation=3000, verbose=True) # periodic boundary conditions impose that the solution is defined up to a constant # we hack the evaluate method to subtract the mean value of the solution old_evaluate = space3.evaluate def new_evaluate(x, mu): u = old_evaluate(x, mu) mean = torch.mean(u.w, dim=0, keepdim=True).detach() u.w = u.w - mean return u space3.evaluate = new_evaluate # %% # for plotting pinns and pinns2 plot_abstract_approx_spaces( ( pinns.space, pinns2.space, pinns3.space, ), # an Iterable of AbstractSpace domain_x, # either a VolumetricDomain, or an Iterable of VolumetricDomain of length 1 or len(first argument) loss=( pinns.losses, pinns2.losses, pinns3.losses, ), # same as previously; if only one is given, it will be used for all spaces residual=( pinns.pde, pinns2.pde, pinns3.pde, ), # same as previously; if only one is given, it will be used for all spaces error=exact_sol, # same as previously; if only one is given, it will be used for all spaces draw_contours=True, n_drawn_contours=20, ) plt.show() # %% #### now, we do the same but with the natural gradient #### #### first space: weak boundary conditions ##### space = NNxSpace(1, 0, GenericMLP, domain_x, sampler, layer_sizes=[40]) pde = Laplacian2DDirichletStrongFormNoParam(space, f=f_rhs, g=f_bc) pde.linear = True losses = GenericLosses( [ ("residual", torch.nn.MSELoss(), 1.0), ("bc", torch.nn.MSELoss(), 40.0), ], ) pinns = NaturalGradientPinnsElliptic(pde, bc_type="weak", losses=losses) # train for 100 epochs pinns.solve(epochs=100, n_collocation=3000, n_bc_collocation=1600, verbose=True) ### plot sampler_plot = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([])] ) # for plotting pinns plot_abstract_approx_spaces( pinns.space, # the approximation space domain_x, # the spatial domain [], # the parameter's domain loss=pinns.losses, # for plot of the loss: the losses residual=pde, # for plot of the residual: the pde solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.2], [0.0, 1.0]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, ) plt.show() # %% #### second space: hard-constrained boundary conditions ##### def post_processing(inputs: torch.Tensor, x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() return inputs * (x1 + 1) * (1 - x1) * (x2 + 1) * (1 - x2) def functional_post_processing(u, xy, mu, theta): return u(xy, mu, theta) * (xy[0] + 1) * (1 - xy[0]) * (xy[1] + 1) * (1 - xy[1]) space2 = NNxSpace( 1, 0, GenericMLP, domain_x, sampler, layer_sizes=[40], post_processing=post_processing, ) pde2 = Laplacian2DDirichletStrongFormNoParam(space2, f=f_rhs, g=f_bc) pinns2 = NaturalGradientPinnsElliptic( pde2, bc_type="strong", functional_post_processing=functional_post_processing ) # train until loss <= 1e-6, for a maximum of 200 epochs pinns2.solve(max_epochs=200, loss_target=1e-6, n_collocation=3000, verbose=True) # %% #### third space: periodic boundary conditions (no post-processing needed) ##### # we need to remove the bias in the last layer # indeed, the only loss function is $\Delta u + f = 0$ # therefore, the bias of the last layer is never used, and should be removed to avoid trying to optimize it and failing to find its gradient space3 = NNxSpace( 1, 0, PeriodicMLP, domain_x, sampler, layer_sizes=[20, 20], last_layer_has_bias=False, ) pde3 = Laplacian2DDirichletStrongFormNoParam(space3, f=f_rhs, g=f_bc) pinns3 = NaturalGradientPinnsElliptic(pde3, bc_type="strong") # train until loss <= 1e-7, for a maximum of 200 epochs pinns3.solve(max_epochs=200, loss_target=1e-6, n_collocation=3000, verbose=True) # periodic boundary conditions impose that the solution is defined up to a constant # we hack the evaluate method to subtract the mean value of the solution old_evaluate = space3.evaluate def new_evaluate(x, mu): u = old_evaluate(x, mu) mean = torch.mean(u.w, dim=0, keepdim=True).detach() u.w = u.w - mean return u space3.evaluate = new_evaluate # %% # for plotting pinns and pinns2 plot_abstract_approx_spaces( ( pinns.space, pinns2.space, pinns3.space, ), # an Iterable of AbstractSpace domain_x, # either a VolumetricDomain, or an Iterable of VolumetricDomain of length 1 or len(first argument) loss=( pinns.losses, pinns2.losses, pinns3.losses, ), # same as previously; if only one is given, it will be used for all spaces residual=( pinns.pde, pinns2.pde, pinns3.pde, ), # same as previously; if only one is given, it will be used for all spaces error=exact_sol, # same as previously; if only one is given, it will be used for all spaces draw_contours=True, n_drawn_contours=20, ) plt.show() # %%