r"""Solves a 1D heat equation with pre-processing. The neural network is a simple MLP (Multilayer Perceptron). The initial and boundary conditions are handled strongly. The optimization is done using Natural Gradient Descent. """ # %% import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxtSpace from scimba_torch.domain.meshless_domain.domain_1d import Segment1D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.integration.monte_carlo_time import UniformTimeSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.temporal_pde.pinns import ( NaturalGradientTemporalPinns, ) from scimba_torch.physical_models.elliptic_pde.abstract_elliptic_pde import ( StrongFormEllipticPDE, ) from scimba_torch.physical_models.temporal_pde.abstract_temporal_pde import ( FirstOrderTemporalPDE, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor torch.manual_seed(0) d = 1 # spatial dimension sigma = 1.0 x_min, x_max = -1.0, 1.0 t_min, t_max = 0.0, 1.0 / float(d) domain_x = Segment1D((x_min, x_max), is_main_domain=True) def func_exact(t, x, mu): fac = (sigma**2 / (sigma**2 + 2 * t)) ** (float(d) / 2.0) return fac * torch.exp(-(x**2) / (2 * sigma**2 + 4 * t)) def exact(t, x, mu): return func_exact(t.x, x.x, mu.x) def f_ini(x, mu): t = LabelTensor(torch.zeros_like(x.x)) return exact(t, x, mu) def f_bc_rhs(t, x, n, mu): return exact(t, x, mu) class Laplacian1DDirichletStrongFormNoParam(StrongFormEllipticPDE): def __init__(self, space, f, g, **kwargs): super().__init__( space, linear=True, residual_size=1, bc_residual_size=1, **kwargs ) self.f = f self.g = g def rhs(self, w, x, mu): return self.f(x, mu) def operator(self, w, x, mu): u = w.get_components() u_xx = self.grad(self.grad(u, x), x) return -u_xx def bc_rhs(self, w, x, n, mu): return self.g(x, mu) def bc_operator(self, w, x, n, mu): return w.get_components() def zeros_rhs(w, t, x, mu, nb_func: int = 1): return torch.zeros(x.shape[0], nb_func) def zeros_bc_rhs(w, t, x, n, mu, nb_func: int = 1): return torch.zeros(x.shape[0], nb_func) class HeatEquation1DDirichletStrongFormNoParam(FirstOrderTemporalPDE): def __init__(self, space, init, f=zeros_rhs, g=zeros_bc_rhs, **kwargs): super().__init__(space, linear=True, **kwargs) self.space_component = Laplacian1DDirichletStrongFormNoParam(space, f, g) self.f = f self.g = g self.init = init self.ic_residual_size = 1 def space_operator(self, w, t, x, mu): return self.space_component.operator(w, x, mu) def bc_operator(self, w, t, x, n, mu): return self.space_component.bc_operator(w, x, n, mu) def rhs(self, w, t, x, mu: LabelTensor): return self.f(w, t, x, mu) def bc_rhs(self, w, t, x, n, mu): return self.g(w, t, x, n, mu) def initial_condition(self, x, mu): return self.init(x, mu) def functional_operator(self, func, t, x, mu, theta): grad_func = torch.func.jacrev(func, 1) space_op = -torch.func.jacrev(grad_func, 1)(t, x, mu, theta) time_op = torch.func.jacrev(func, 0)(t, x, mu, theta) return (time_op + space_op)[0, 0] def functional_operator_bc(self, func, t, x, n, mu, theta): return func(t, x, mu, theta) def functional_operator_ic(self, func, x, mu, theta): t = torch.zeros_like(x) return func(t, x, mu, theta) def post_processing( inputs: torch.Tensor, t: LabelTensor, x: LabelTensor, mu: LabelTensor ): x1 = x.get_components() t1 = t.get_components() x_max_tensor = LabelTensor(x_max * torch.ones_like(x.x)) u_ini = f_ini(x, mu) u_bc = exact(t, x_max_tensor, mu) u_bc_ini = f_ini(x_max_tensor, mu) return u_ini + u_bc - u_bc_ini + inputs * t1 * (x1 + 1) * (1 - x1) def func_f_ini(x, mu): t = torch.zeros_like(x) return func_exact(t, x, mu) def functional_post_processing(func, t, x, mu, theta) -> torch.Tensor: u_ini = func_exact(0, x, mu) x_max_tensor = torch.full_like(x, x_max) u_bc = func_exact(t, x_max_tensor, mu) u_bc_ini = func_exact(0, x_max_tensor, mu) inputs = func(t, x, mu, theta) return u_ini + u_bc - u_bc_ini + inputs * t * (x[0] + 1) * (1 - x[0]) def pre_processing(t, x: LabelTensor, mu: LabelTensor): return torch.cat([t.x, x.x**2], dim=-1) def functional_pre_processing(*args): return torch.cat([args[0], args[1] ** 2], dim=-1) sampler = TensorizedSampler( [ UniformTimeSampler((t_min, t_max)), DomainSampler(domain_x), UniformParametricSampler([]), ] ) space = NNxtSpace( 1, 0, GenericMLP, domain_x, sampler, layer_sizes=[16, 16], pre_processing=pre_processing, post_processing=post_processing, ) pde = HeatEquation1DDirichletStrongFormNoParam(space, init=f_ini, g=f_bc_rhs) pinn = NaturalGradientTemporalPinns( pde, bc_type="strong", ic_type="strong", matrix_regularization=1e-6, functional_pre_processing=functional_pre_processing, functional_post_processing=functional_post_processing, ) pinn.solve(epochs=100, n_collocation=1000) plot_abstract_approx_spaces( pinn.space, domain_x, [], (t_min, t_max), loss=pinn.losses, residual=pde, solution=exact, error=exact, ) plt.show()