r"""Learns the initial condition of a linearized Euler equation in 1D. The goal is to learn a function from :math:`\mathbb{R} \times \mathbb{R} \to \mathbb{R}^2`. This example compares three projection methods: Adam, natural gradient preconditioning, and Anagram. """ from typing import Tuple import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_1d import Segment1D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.collocation_projector import ( AnagramProjector, CollocationProjector, NaturalGradientProjector, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor def exact_solution(t: LabelTensor, x: LabelTensor, mu: LabelTensor) -> torch.Tensor: x = x.get_components() D = 0.02 coeff = 1 / (4 * torch.pi * D) ** 0.5 p_plus_u = coeff * torch.exp(-((x - t.x - 1) ** 2) / (4 * D)) p_minus_u = coeff * torch.exp(-((x + t.x - 1) ** 2) / (4 * D)) p = (p_plus_u + p_minus_u) / 2 u = (p_plus_u - 2 * p_minus_u) / 2 return torch.cat((p, u), dim=-1) def initial_solution(x: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: sol = exact_solution(LabelTensor(torch.zeros_like(x.x)), x, mu) return sol[..., 0:1], sol[..., 1:2] def solution(x: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]: sol = exact_solution(LabelTensor(torch.zeros_like(x.x)), x, mu) return sol domain_mu = [] domain_x = Segment1D((-1.0, 3.0), is_main_domain=True) t_min, t_max = 0.0, 0.5 domain_t = (t_min, t_max) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler(domain_mu)] ) space = NNxSpace(2, 0, GenericMLP, domain_x, sampler, layer_sizes=[32]) opt_1 = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } opt_2 = { "name": "lbfgs", "switch_at_epoch_ratio": 0.7, } proj = CollocationProjector( space, initial_solution, optimizers=OptimizerData(opt_1, opt_2), bool_preconditioner=False, nb_components=2, ) proj.solve(epochs=1000, n_collocation=1000, verbose=True) proj.space.load_from_best_approx() # plot_AbstractApproxSpaces( # proj.space, # domain_x, # domain_mu, # loss=proj.losses, # solution=solution, # error=solution, # ) # # plt.show() space2 = NNxSpace(2, 0, GenericMLP, domain_x, sampler, layer_sizes=[32]) proj2 = NaturalGradientProjector( space2, initial_solution, nb_components=2, matrix_regularization=1e-6, ) proj2.solve( epochs=200, n_collocation=3000, n_bc_collocation=1000, n_ic_collocation=1000, verbose=True, ) plot_abstract_approx_spaces( ( proj.space, proj2.space, ), domain_x, domain_mu, loss=(proj.losses, proj2.losses), solution=solution, error=solution, ) plt.show() space3 = NNxSpace(2, 0, GenericMLP, domain_x, sampler, layer_sizes=[32]) proj3 = AnagramProjector( space3, initial_solution, nb_components=2, svd_threshold=5e-4, ) proj3.solve( epochs=200, n_collocation=3000, n_bc_collocation=1000, n_ic_collocation=1000, verbose=True, ) plot_abstract_approx_spaces( ( proj2.space, proj3.space, ), domain_x, domain_mu, loss=(proj2.losses, proj3.losses), solution=solution, error=solution, ) plt.show()