r"""Approximates a filamenting 2D function.""" # %% import copy import matplotlib.pyplot as plt import torch from scimba_torch import PI from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.mesh_based_domain.cuboid import Cuboid from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.mesh_based_quadrature import RectangleMethod from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.collocation_projector import ( CollocationProjector, ) from scimba_torch.numerical_solvers.temporal_pde.neural_semilagrangian import ( Characteristic, NeuralSemiLagrangian, ) from scimba_torch.numerical_solvers.temporal_pde.time_discrete import ( TimeDiscreteCollocationProjector, TimeDiscreteNaturalGradientProjector, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.temporal_pde.advection_diffusion_equation import ( AdvectionReactionDiffusionDirichletStrongForm, ) from scimba_torch.plots.plot_time_discrete_scheme import plot_time_discrete_scheme from scimba_torch.utils.scimba_tensors import LabelTensor DEFAULT_C_F = 0.5 DEFAULT_BOUNDS = [(0, 1), (0, 1)] # exact flow and its inverse def G(x, d, c, L=1 / 2): # noqa: N802 y = x.clone() cs = c / 4 * torch.sin(2 * PI * x[:, 1 - d] / L) y[:, d] = x[:, d] + cs return y def inv_G(y, d, c, L=1 / 2): # noqa: N802 x = y.clone() cs = c / 4 * torch.sin(2 * PI * x[:, 1 - d] / L) x[:, d] = y[:, d] - cs return x def F(x, c): # noqa: N802 y = x y = G(y, 0, c) y = G(y, 1, -c) y = G(y, 0, -c) y = G(y, 1, c) return y def inv_F(x, c): # noqa: N802 y = x y = inv_G(y, 1, c) y = inv_G(y, 0, -c) y = inv_G(y, 1, -c) y = inv_G(y, 0, c) return y def get_flows(c_F): assert -1 < c_F < 1, f"c_F must be in (-1,1), got {c_F}" return lambda x, c=c_F: F(x, c), lambda x, c=c_F: inv_F(x, c) # initial condition and exact solution def f_ini(x, mu): x1, x2 = x.get_components() x1_t = 0.5 x2_t = 0.5 sig0 = 0.18 / 2**0.5 f = torch.exp(-((x1 - x1_t) ** 2 + (x2 - x2_t) ** 2) / (2 * sig0**2)) return 1 + f def f_exact(t, x, mu, c_F=DEFAULT_C_F): Fc, inv_Fc = get_flows(c_F) bwd_x = LabelTensor(inv_Fc(x.x), x.labels) return f_ini(bwd_x, mu) def exact_foot( t: torch.Tensor, x: LabelTensor, mu: LabelTensor, dt: float, c_F: float = DEFAULT_C_F, ) -> torch.Tensor: Fc, inv_Fc = get_flows(c_F) return inv_Fc(x.x) def opt(): opt_1 = { "name": "adam", "optimizer_args": {"lr": 2.5e-2, "betas": (0.9, 0.999)}, } opt_2 = { "name": "lbfgs", "switch_at_epoch_ratio": 0.9, "optimizer_args": { "history_size": 50, "max_iter": 50, "tolerance_grad": 1e-11, "tolerance_change": 1e-9, }, } return OptimizerData(opt_1, opt_2) # %% def solve_with_neural_sl( T: float, dt: float, with_natural_gradient: bool = True, with_classical_projector: bool = False, use_rectangle_method: bool = False, N_c: int = 10_000, retrain_initial_condition: bool = True, ): torch.random.manual_seed(0) if use_rectangle_method: domain_x = Cuboid(DEFAULT_BOUNDS, is_main_domain=True) x_sampler = RectangleMethod else: domain_x = Square2D(DEFAULT_BOUNDS, is_main_domain=True) x_sampler = DomainSampler domain_mu = [] sampler = TensorizedSampler( [ x_sampler(domain_x), UniformParametricSampler(domain_mu), ] ) def a(t, x, mu): raise ValueError("This should not be called") if with_classical_projector: space = NNxSpace( 1, 0, GenericMLP, domain_x, sampler, layer_sizes=[60, 60, 60], activation_type="tanh", ) pde = AdvectionReactionDiffusionDirichletStrongForm( space, a=a, u0=f_ini, constant_advection=False, zero_diffusion=True ) characteristic = Characteristic(pde, exact_foot=exact_foot) projector = TimeDiscreteCollocationProjector(pde, rhs=f_ini, optimizers=opt()) scheme = NeuralSemiLagrangian(characteristic, projector) scheme.initialization(epochs=750, n_collocation=N_c, verbose=True) scheme.projector.save("ini_transport_2D_SL") scheme.projector = CollocationProjector(space, rhs=f_ini, optimizers=opt()) scheme.projector.load("ini_transport_2D_SL") scheme.projector.space.load_from_best_approx() scheme.solve(dt=dt, final_time=T, epochs=750, n_collocation=N_c, verbose=True) if with_natural_gradient: space = NNxSpace( 1, 0, GenericMLP, domain_x, sampler, layer_sizes=[20, 40, 20], activation_type="tanh", ) pde = AdvectionReactionDiffusionDirichletStrongForm( space, a=a, u0=f_ini, constant_advection=False, zero_diffusion=True ) characteristic = Characteristic( pde, exact_foot=exact_foot, flipped_periodic=True ) if retrain_initial_condition: projector = TimeDiscreteNaturalGradientProjector(pde, rhs=f_ini) scheme = NeuralSemiLagrangian(characteristic, projector) scheme.initialization(epochs=250, n_collocation=N_c, verbose=True) scheme.projector.save("ini_transport_2D_filamenting_SL") # plot_results(scheme, 0, init=True) projector = TimeDiscreteNaturalGradientProjector(pde, rhs=f_ini) projector.load("ini_transport_2D_filamenting_SL") projector.space.load_from_best_approx() scheme = NeuralSemiLagrangian(characteristic, projector) scheme.initial_space = copy.deepcopy(scheme.projector.space) scheme.solve(dt=dt, final_time=T, epochs=150, n_collocation=N_c, verbose=True) return scheme # %% if __name__ == "__main__": scheme = solve_with_neural_sl( 1.0, 1.0, with_natural_gradient=True, with_classical_projector=False, use_rectangle_method=False, # N_c=500**2, N_c=1000, retrain_initial_condition=False, ) def f_exact_for_plot(t, x, mu, c_F=DEFAULT_C_F): if torch.all(t.x == 0.0): return f_ini(x, mu) Fc, inv_Fc = get_flows(c_F) bwd_x = LabelTensor(inv_Fc(x.x), x.labels) return f_ini(bwd_x, mu) plot_time_discrete_scheme( scheme, solution=f_exact_for_plot, error=f_exact_for_plot, ) plt.show() # %%