"""Projection of a 1D function using kernel-based approximation spaces.""" import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.kernelx_space import ( ExponentialKernel, GaussianKernel, KernelxSpace, ) from scimba_torch.domain.meshless_domain.domain_1d import Segment1D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.numerical_solvers.collocation_projector import ( LinearProjector, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor def func_test(x: LabelTensor, mu: LabelTensor): return torch.cos(x.get_components() * 2 * torch.pi) # def func_test(x: LabelTensor, mu: LabelTensor): # return ( # 1-torch.abs(x.get_components()) # ) torch.manual_seed(0) domain_x = Segment1D((-1.0, 1.0), is_main_domain=True) sampler = TensorizedSampler([DomainSampler(domain_x), UniformParametricSampler([])]) n = 200 n_centers = 40 space1 = KernelxSpace( 1, 0, kernel_type=ExponentialKernel, nb_centers=n_centers, spatial_domain=domain_x, beta=-1, integrator=sampler, ) p = LinearProjector(space1, func_test) p.solve(n_collocation=n, verbose=True) # p = CollocationProjector(space1, func_test, optimizers=default_opt) # p.solve(epochs=500, n_collocation=1000, verbose=True) # space2 = KernelxSpace( # 1, # 0, # kernel_type=MaternKernel, # nb_centers=n_centers, # spatial_domain=domain_x, # beta=0.5, # integrator=sampler, # ) # # p2 = CollocationProjector(space2, func_test) # # p2.solve(epochs=500, n_collocation=1000, verbose=True) # p2 = LinearProjector(space2, func_test) # p2.solve(n_collocation=n, verbose=True) space3 = KernelxSpace( 1, 0, kernel_type=GaussianKernel, nb_centers=n_centers, spatial_domain=domain_x, beta=1, eps=2.0, integrator=sampler, ) # p3 = CollocationProjector(space3, func_test) # p3.solve(epochs=500, n_collocation=1000, verbose=True) p3 = LinearProjector(space3, func_test) p3.solve(n_collocation=n, verbose=True) plot_abstract_approx_spaces( (p.space, p3.space), # the approximation spaces (domain_x), # the spatial domain ((),), # the parametric domain loss=( None, None, # p.losses, # p2.losses, # p3.losses, ), # for plot of the loss: the losses solution=(func_test), # for plot of the exact sol: sol error=(func_test), # for plot of the error with respect to a func: the func draw_contours=True, n_drawn_contours=20, ) plt.show() # x, mu = sampler.sample(40000) # x1, x2 = x.get_components() # w_exact = func_test(x, mu) # w1 = space1.evaluate(x, mu).w.detach().cpu() # w2 = space2.evaluate(x, mu).w.detach().cpu() # e1 = (space1.evaluate(x, mu).w - w_exact).detach().cpu() # e2 = (space2.evaluate(x, mu).w - w_exact).detach().cpu() # fig, ax = plt.subplots(2, 2, figsize=(10, 8)) # x1 = x1.detach().cpu() # x2 = x2.detach().cpu() ## Premier scatter avec colorbar # sc = ax[0, 0].scatter(x1, x2, c=w1, s=1, cmap="turbo") # sc = ax[0, 1].scatter( # x1, # x2, # c=w1 - w_exact.detach().cpu(), # s=1, # cmap="turbo", # label=f"{torch.sqrt(torch.mean(e1**2)):3.2e}", # ) # fig.colorbar(sc, ax=ax[0, 1]) # ax[0, 1].legend() ## Premier scatter avec colorbar # sc = ax[1, 0].scatter(x1, x2, c=w2, s=1, cmap="turbo") # fig.colorbar(sc, ax=ax[1, 0]) # sc = ax[1, 1].scatter( # x1, # x2, # c=w2 - w_exact.detach().cpu(), # s=1, # cmap="turbo", # label=f"{torch.sqrt(torch.mean(e2**2)):3.2e}", # ) # fig.colorbar(sc, ax=ax[1, 1]) # ax[1, 1].legend() # plt.show()