"""Approximation of a function using several optimization strategies.""" import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import ( NNxSpace, ) from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.collocation_projector import ( AnagramProjector, CollocationProjector, LinearProjector, NaturalGradientProjector, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor def func_test(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() mu1 = mu.get_components() return torch.sin(x1) * mu1 * torch.cos(x2) def bc_test(x: LabelTensor, xn: LabelTensor, mu: LabelTensor): return func_test(x, mu) domain_x = Square2D([(-1, 1), (-1, 1)], is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler([(1.0, 2.0)])] ) opt = { "name": "sgd", "optimizer_args": {"lr": 1.5e-2}, } print("\n\n") print(" ################################################# ") print(" # CollocationProjector, no preconditioning # ") print(" ################################################# ") space = NNxSpace(1, 1, GenericMLP, domain_x, sampler, layers_sizes=[20]) print("ndof ", space.ndof) p1 = CollocationProjector( space, func_test, optimizers=OptimizerData(opt), bool_preconditioner=False ) new_solve = False if new_solve or not p1.load(__file__, "projector_no_precond"): p1.solve(epochs=800, n_collocation=1000, verbose=True) p1.save(__file__, "projector_no_precond") print("\n\n") print(" ################################################# ") print(" # NaturalGradientProjector # ") print(" ################################################# ") space2 = NNxSpace(1, 1, GenericMLP, domain_x, sampler, layers_sizes=[20]) p2 = NaturalGradientProjector( space2, func_test, ) new_solve = False if new_solve or not p2.load(__file__, "natural"): # load from p1.space # p2.space.load_from_dict(p1.space.dict_for_save()) # p2.space.load_from_best_approx() p2.solve(epochs=50, n_collocation=1000, verbose=True) p2.save(__file__, "natural") print("\n\n") print(" ################################################# ") print(" # AnagramProjector # ") print(" ################################################# ") space3 = NNxSpace(1, 1, GenericMLP, domain_x, sampler, layers_sizes=[20]) p3 = AnagramProjector( space3, func_test, svd_threshold=1e-6, ) new_solve = False if new_solve or not p3.load(__file__, "anagram"): p3.solve(epochs=60, n_collocation=1000, verbose=True) p3.save(__file__, "anagram") print("\n\n") print(" ################################################# ") print(" # LinearProjector # ") print(" ################################################# ") space4 = NNxSpace(1, 1, GenericMLP, domain_x, sampler, layers_sizes=[20]) print("ndof ", space4.ndof) p4 = LinearProjector( space4, func_test, ) p4.solve(n_collocation=100000, verbose=True) plot_abstract_approx_spaces( ( p1.space, p2.space, p3.space, ), # the approximation spaces (domain_x), # the spatial domain ([[1.0, 2.0]]), # the parameter's domain loss=( p1.losses, p2.losses, p3.losses, ), # for plot of the loss: the losses solution=(func_test), # for plot of the exact sol: sol error=(func_test), # for plot of the error with respect to a func: the func draw_contours=True, n_drawn_contours=20, parameters_values="mean", ) plt.show()