import matplotlib.pyplot as plt import torch import torch.nn as nn from torch.func import functional_call, jacrev, vmap from scimba_torch.approximation_space.abstract_space import AbstractApproxSpace from scimba_torch.approximation_space.spectral_space import SpectralxSpace from scimba_torch.domain.meshless_domain.base import VolumetricDomain from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.scimba_module import ScimbaModule from scimba_torch.numerical_solvers.collocation_projector import ( CollocationProjector, LinearProjector, NaturalGradientProjector, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor torch.set_default_dtype(torch.double) class CustomSpace(AbstractApproxSpace, ScimbaModule): """A nonlinear approximation space using a neural network model. This class represents a parametric approximation space, where the solution is modeled by a neural network. It integrates functionality for evaluating the network, setting/retrieving degrees of freedom, and computing the Jacobian. Args: nb_unknowns: Number of unknowns in the approximation problem. nb_parameters: Number of parameters in the approximation problem. kernel_type: The type of kernel to use for the approximation. nb_centers: Number of centers for the kernel functions. spatial_domain: The spatial domain of the problem. integrator: Sampler used for integration over the spatial and parameter domains. **kwargs: Additional arguments passed to the neural network model. """ def __init__( self, nb_unknowns: int, nb_parameters: int, nb_centers: int, spatial_domain: VolumetricDomain, integrator: TensorizedSampler, **kwargs, ): # Call the initializer of ScimbaModule ScimbaModule.__init__( self, in_size=spatial_domain.dim + nb_parameters, # problem out_size=nb_unknowns, **kwargs, ) # Call the initializer of AbstractApproxSpace AbstractApproxSpace.__init__(self, nb_unknowns) self.spatial_domain: VolumetricDomain = spatial_domain self.freq = nn.Parameter(torch.tensor([1.0])) self.a = nn.Parameter(torch.tensor([1.0])) self.output_layer = torch.nn.Linear(nb_centers, self.out_size) #: Total number of degrees of freedom in the network. self.ndof: int = self.get_dof(flag_format="tensor").shape[0] self.Id = torch.eye(spatial_domain.dim + nb_parameters) def forward( self, features: torch.Tensor, with_last_layer: bool = True ) -> torch.Tensor: """Forward pass through the kernel model. Args: features: Input tensor with concatenated spatial and parameter data. with_last_layer: Whether to apply the final linear layer. Returns: Output tensor from the kernel model. """ vect_diff = torch.zeros((features.shape[0], self.centers.shape[0], 2)) vect_diff = features.unsqueeze(1) - self.centers.unsqueeze(0) if self.anisotropic: # Apply anisotropic transformation Aniso = self.M_aniso else: Aniso = self.eps[:, None, None] * self.Id basis = self.kernel(vect_diff, Aniso) # (batch_size, nb_centers) if with_last_layer: res = self.output_layer(basis) else: res = basis return res def evaluate( self, x: LabelTensor, mu: LabelTensor, with_last_layer: bool = True ) -> MultiLabelTensor: """Evaluate the parametric model for given inputs and parameters. Args: x: Input tensor from the spatial domain. mu: Input tensor from the parameter domain. with_last_layer: Whether to apply the final linear layer. Returns: Output tensor from the neural network, wrapped with multi-label metadata. """ features = self.pre_processing(x, mu) res = self.forward(features, with_last_layer) if with_last_layer: res = self.post_processing(res, x, mu) return MultiLabelTensor(res, [x.labels, mu.labels]) def set_dof(self, theta: torch.Tensor, flag_scope: str = "all") -> None: """Sets the degrees of freedom (DoF) for the neural network. Args: theta: A vector containing the network parameters. flag_scope: The scope of parameters to return. """ self.set_parameters(theta, flag_scope) def get_dof( self, flag_scope: str = "all", flag_format: str = "list" ) -> torch.Tensor: """Retrieves the degrees of freedom (DoF) of the neural network. Args: flag_scope: The scope of parameters to return. flag_format: The format of the returned parameters. Returns: Tensor containing the DoF of the network. """ return self.parameters(flag_scope=flag_scope, flag_format=flag_format) def jacobian(self, x: LabelTensor, mu: LabelTensor) -> torch.Tensor: """Compute the Jacobian of the network with respect to its parameters. Args: x: Input tensor from the spatial domain. mu: Input tensor from the parameter domain. Returns: Jacobian matrix of shape `(num_samples, out_size, num_params)`. """ params = {k: v.detach() for k, v in self.named_parameters()} features = self.pre_processing(x, mu) def fnet(theta, features): return functional_call(self, theta, (features.unsqueeze(0))).squeeze(0) jac = vmap(jacrev(fnet), (None, 0))(params, features).values() jac_m = torch.cat( [j.reshape((features.shape[0], self.out_size, -1)) for j in jac], dim=-1 ) jac_mt = jac_m.transpose(1, 2) return self.post_processing(jac_mt, x, mu) def func_test(x: LabelTensor, mu: LabelTensor): x1, x2 = x.get_components() sigma = 0.4 return ( torch.sin(2.5 * torch.pi * x1) * torch.sin(4.5 * torch.pi * x2) * torch.exp(-((x1) ** 2 + (x2) ** 2) / (2 * sigma**2)) ) torch.manual_seed(0) domain_x = Square2D([(-1.0, 1.0), (-1.0, 1.0)], is_main_domain=True) sampler = TensorizedSampler([DomainSampler(domain_x), UniformParametricSampler([])]) space1 = SpectralxSpace( 1, "sine", 6, bounds=[(-1.0, 1.0), (-1.0, 1.0)], integrator=sampler ) p = LinearProjector(space1, func_test) p.solve(n_collocation=10000) space2 = SpectralxSpace( 1, "sine", 6, bounds=[(-1.0, 1.0), (-1.0, 1.0)], integrator=sampler ) opt = { "name": "adam", "optimizer_args": {"lr": 4.0e-2}, } p2 = CollocationProjector(space2, func_test, optimizers=opt) p2.solve(epochs=500, n_collocation=10000) space3 = SpectralxSpace( 1, "sine", 6, bounds=[(-1.0, 1.0), (-1.0, 1.0)], integrator=sampler ) p3 = NaturalGradientProjector(space3, func_test) p3.solve(epochs=20, n_collocation=10000) plot_abstract_approx_spaces( (p.space, p2.space, p3.space), # the approximation spaces (domain_x), # the spatial domain loss=( None, p2.losses, p3.losses, ), # for plot of the loss: the losses solution=(func_test), # for plot of the exact sol: sol error=(func_test), # for plot of the error with respect to a func: the func draw_contours=True, n_drawn_contours=20, ) plt.show() # x, mu = sampler.sample(40000) # x1, x2 = x.get_components() # w_exact = func_test(x, mu) # w1 = space1.evaluate(x, mu).w.detach().cpu() # w2 = space2.evaluate(x, mu).w.detach().cpu() # e1 = (space1.evaluate(x, mu).w - w_exact).detach().cpu() # e2 = (space2.evaluate(x, mu).w - w_exact).detach().cpu() # fig, ax = plt.subplots(2, 2, figsize=(10, 8)) # x1 = x1.detach().cpu() # x2 = x2.detach().cpu() ## Premier scatter avec colorbar # sc = ax[0, 0].scatter(x1, x2, c=w1, s=1, cmap="turbo") # sc = ax[0, 1].scatter( # x1, # x2, # c=w1 - w_exact.detach().cpu(), # s=1, # cmap="turbo", # label=f"{torch.sqrt(torch.mean(e1**2)):3.2e}", # ) # fig.colorbar(sc, ax=ax[0, 1]) # ax[0, 1].legend() ## Premier scatter avec colorbar # sc = ax[1, 0].scatter(x1, x2, c=w2, s=1, cmap="turbo") # fig.colorbar(sc, ax=ax[1, 0]) # sc = ax[1, 1].scatter( # x1, # x2, # c=w2 - w_exact.detach().cpu(), # s=1, # cmap="turbo", # label=f"{torch.sqrt(torch.mean(e2**2)):3.2e}", # ) # fig.colorbar(sc, ax=ax[1, 1]) # ax[1, 1].legend() # plt.show()