r"""Solves the electrostatics equation in 2D on a bi-material domain using PINNs. .. math:: -\sigma (\partial_{xx} u_1 - \partial_{yy} u_1) = 0 in \Omega The domain is defined as a bi-material region (a rectangle within a square) with different material properties (:math:`\sigma = 1` in the outer domain and :math:`\sigma = 5` in the inner domain); the sixteen boundary and interface conditions are all enforced weakly. The neural network used is a simple MLP (Multilayer Perceptron); the optimization is performed using either Adam, Natural Gradient Descent, or Anagram. """ from collections import OrderedDict import matplotlib.pyplot as plt import torch from scimba_torch.approximation_space.nn_space import NNxSpace from scimba_torch.domain.meshless_domain.domain_2d import Square2D from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP from scimba_torch.numerical_solvers.elliptic_pde.pinns import ( AnagramPinnsElliptic, NaturalGradientPinnsElliptic, PinnsElliptic, ) from scimba_torch.optimizers.optimizers_data import OptimizerData from scimba_torch.physical_models.elliptic_pde.abstract_elliptic_pde import ( StrongFormEllipticPDE, ) from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor class BimaterialElectrostatic(StrongFormEllipticPDE): def __init__(self, space, f, g, **kwargs): super().__init__( space, linear=True, residual_size=2, bc_residual_size=16, **kwargs ) self.f = f self.g = g self.e_r = 3.3 def rhs(self, w, x, mu): return self.f(x, mu) def operator(self, w, x, mu): u_0, u_1 = w.get_components() u_x_0, u_y_0 = self.grad(u_0, x) u_xx_0, _ = self.grad(u_x_0, x) _, u_yy_0 = self.grad(u_y_0, x) u_x_1, u_y_1 = self.grad(u_1, x) u_xx_1, _ = self.grad(u_x_1, x) _, u_yy_1 = self.grad(u_y_1, x) u_xx_0 = w.restrict_to_labels(u_xx_0, labels=[0]) u_yy_0 = w.restrict_to_labels(u_yy_0, labels=[0]) u_xx_1 = w.restrict_to_labels(u_xx_1, labels=[1]) u_yy_1 = w.restrict_to_labels(u_yy_1, labels=[1]) return -(u_xx_0 + u_yy_0), -5 * (u_xx_1 + u_yy_1) def restrict_to_component(self, i: int, func): return lambda *args: func(*args)[i : i + 1, ...] def functional_operator_0(self, func, x, mu, theta): grad_u = self.restrict_to_component(0, torch.func.jacrev(func, 0)) hessian_u = torch.func.jacrev(grad_u, 0)(x, mu, theta) return -(hessian_u[..., 0, 0] + hessian_u[..., 1, 1]) def functional_operator_1(self, func, x, mu, theta): grad_u = self.restrict_to_component(1, torch.func.jacrev(func, 0)) hessian_u = torch.func.jacrev(grad_u, 0)(x, mu, theta) return -5.0 * (hessian_u[..., 0, 0] + hessian_u[..., 1, 1]) def functional_operator(self) -> OrderedDict: return OrderedDict( [(0, self.functional_operator_0), (1, self.functional_operator_1)] ) def bc_rhs(self, w, x, n, mu): return self.g(x, mu) def bc_operator( self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor ) -> tuple[torch.Tensor, torch.Tensor]: u_o, u_i = w.get_components() u_o_x, u_o_y = self.grad(u_o, x) u_i_x, u_i_y = self.grad(u_i, x) u_o_y_S = w.restrict_to_labels(u_o_y, labels=[0]) u_o_E = w.restrict_to_labels(u_o, labels=[1]) u_o_y_N = w.restrict_to_labels(u_o_y, labels=[2]) u_o_W = w.restrict_to_labels(u_o, labels=[3]) u_o_i_S = w.restrict_to_labels(u_o, labels=[100]) u_i_S = w.restrict_to_labels(u_i, labels=[100]) u_o_i_E = w.restrict_to_labels(u_o, labels=[101]) u_i_E = w.restrict_to_labels(u_i, labels=[101]) u_o_i_N = w.restrict_to_labels(u_o, labels=[102]) u_i_N = w.restrict_to_labels(u_i, labels=[102]) u_o_i_W = w.restrict_to_labels(u_o, labels=[103]) u_i_W = w.restrict_to_labels(u_i, labels=[103]) u_o_i_y_S = w.restrict_to_labels(u_o_y, labels=[100]) u_i_y_S = w.restrict_to_labels(u_i_y, labels=[100]) u_o_i_x_E = w.restrict_to_labels(u_o_x, labels=[101]) u_i_x_E = w.restrict_to_labels(u_i_x, labels=[101]) u_o_i_y_N = w.restrict_to_labels(u_o_y, labels=[102]) u_i_y_N = w.restrict_to_labels(u_i_y, labels=[102]) u_o_i_x_W = w.restrict_to_labels(u_o_x, labels=[103]) u_i_x_W = w.restrict_to_labels(u_i_x, labels=[103]) return ( 2 * u_o_y_S, 0 * u_o_y_S, 3 * (u_o_E + 1), 0 * u_o_E, 2 * u_o_y_N, 0 * u_o_y_N, 3 * (u_o_W - 1), 0 * u_o_W, 5 * (u_o_i_S - u_i_S), 1 * (u_o_i_y_S - self.e_r * u_i_y_S), 4 * (u_o_i_E - u_i_E), 1 * (u_o_i_x_E - self.e_r * u_i_x_E), 5 * (u_o_i_N - u_i_N), 1 * (u_o_i_y_N - self.e_r * u_i_y_N), 4 * (u_o_i_W - u_i_W), 1 * (u_o_i_x_W - self.e_r * u_i_x_W), ) def functional_operator_bc_0(self, func, x, n, mu, theta): res = 2 * torch.func.jacrev(func, 0)(x, mu, theta)[..., 1] res[1, ...] *= 0.0 return res def functional_operator_bc_1(self, func, x, n, mu, theta): res = 3 * (func(x, mu, theta) + 1.0) res[1, ...] *= 0.0 return res def functional_operator_bc_2(self, func, x, n, mu, theta): res = 2 * torch.func.jacrev(func, 0)(x, mu, theta)[..., 1] res[1, ...] *= 0.0 return res def functional_operator_bc_3(self, func, x, n, mu, theta): res = 3 * (func(x, mu, theta) - 1.0) res[1, ...] *= 0.0 return res def functional_operator_bc_100(self, func, x, n, mu, theta): u = func(x, mu, theta) u0_m_u1 = u[0:1, ...] - u[1:2, ...] grad_u = torch.func.jacrev(func, 0)(x, mu, theta) grad_u0_m_grad_u1 = grad_u[0:1, 1] - self.e_r * grad_u[1:2, 1] res = torch.cat((5 * u0_m_u1, 1 * grad_u0_m_grad_u1), dim=0) return res def functional_operator_bc_101(self, func, x, n, mu, theta): u = func(x, mu, theta) u0_m_u1 = u[0:1, ...] - u[1:2, ...] grad_u = torch.func.jacrev(func, 0)(x, mu, theta) grad_u0_m_grad_u1 = grad_u[0:1, 0] - self.e_r * grad_u[1:2, 0] res = torch.cat((4 * u0_m_u1, 1 * grad_u0_m_grad_u1), dim=0) return res def functional_operator_bc_102(self, func, x, n, mu, theta): return self.functional_operator_bc_100(func, x, n, mu, theta) def functional_operator_bc_103(self, func, x, n, mu, theta): u = func(x, mu, theta) u0_m_u1 = u[0:1, ...] - u[1:2, ...] grad_u = torch.func.jacrev(func, 0)(x, mu, theta) grad_u0_m_grad_u1 = grad_u[0:1, 0] - self.e_r * grad_u[1:2, 0] res = torch.cat((4 * u0_m_u1, 1 * grad_u0_m_grad_u1), dim=0) return res def functional_operator_bc(self) -> OrderedDict: return OrderedDict( [ (0, self.functional_operator_bc_0), (1, self.functional_operator_bc_1), (2, self.functional_operator_bc_2), (3, self.functional_operator_bc_3), (100, self.functional_operator_bc_100), (101, self.functional_operator_bc_101), (102, self.functional_operator_bc_102), (103, self.functional_operator_bc_103), ] ) def f_rhs(x: LabelTensor, mu: LabelTensor): x_0, _ = x.get_components(label=0) x_1, _ = x.get_components(label=1) return torch.zeros_like(x_0), torch.zeros_like(x_1) def f_bc(x: LabelTensor, mu: LabelTensor): x_0, _ = x.get_components(label=0) x_1, _ = x.get_components(label=1) x_2, _ = x.get_components(label=2) x_3, _ = x.get_components(label=3) x_4, _ = x.get_components(label=100) x_5, _ = x.get_components(label=101) x_6, _ = x.get_components(label=102) x_7, _ = x.get_components(label=103) return ( torch.zeros_like(x_0), torch.zeros_like(x_0), torch.zeros_like(x_1), torch.zeros_like(x_1), torch.zeros_like(x_2), torch.zeros_like(x_2), torch.zeros_like(x_3), torch.zeros_like(x_3), torch.zeros_like(x_4), torch.zeros_like(x_4), torch.zeros_like(x_5), torch.zeros_like(x_5), torch.zeros_like(x_6), torch.zeros_like(x_6), torch.zeros_like(x_7), torch.zeros_like(x_7), ) domain_mu = [] domain_x = Square2D([(-1, 1), (-1, 1)], is_main_domain=True) domain_x_in = Square2D([(-0.5, 0.5), (-0.25, 0.25)], is_main_domain=False) domain_x.add_subdomain(domain_x_in) # bc_domains, bc_holes, bc_subdomains = domain_x.get_all_bc_domains() # print("len(bc_domains): ", len(bc_domains) ) # print("len(bc_subdomains): ", len(bc_subdomains) ) sampler = TensorizedSampler( [DomainSampler(domain_x), UniformParametricSampler(domain_mu)] ) # x, mu = sampler.sample(100) # print("\n\n") # print("x: ", x) # print("mu: ", mu) # xnbc, mubc = sampler.bc_sample(100, index_bc=0) # xbc, nbc = xnbc[0], xnbc[1] # print("len(xnbc): ", len(xnbc)) # print("xbc: ", xbc) # print("nbc: ", nbc) # print("mubc: ", mubc) print("############################# No preconditioning ###########################") space = NNxSpace( 2, 0, GenericMLP, domain_x, sampler, layer_sizes=[40, 40, 40, 40], ) pde = BimaterialElectrostatic(space, f=f_rhs, g=f_bc) opt = { "name": "adam", "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)}, } pinns = PinnsElliptic( pde, bc_type="weak", bc_weight=30.0, optimizers=OptimizerData(opt), one_loss_by_equation=True, ) new_solving = False if new_solving or not pinns.load(__file__, "no_precond"): pinns.solve(epochs=500, n_collocation=6000, n_bc_collocation=6000, verbose=True) pinns.save(__file__, "no_precond") pinns.space.load_from_best_approx() # plot_AbstractApproxSpaces( # (pinns.space,), # domain_x, # domain_mu, # loss=(pinns.losses,), # residual=(pinns.pde,), # derivatives=["ux"], # draw_contours=True, # n_drawn_contours=20, # # parameters_values="mean", # title="Solving Bimaterial Electrostatic", # # titles=("no preconditioning", "ENG", "Anagram"), # loss_groups=["eq bc"], # ) # plt.show() print("########################################################\n") print("############################# ENG preconditioning ###########################") space2 = NNxSpace( 2, 0, GenericMLP, domain_x, sampler, layer_sizes=[16, 16, 16], ) pde2 = BimaterialElectrostatic(space2, f=f_rhs, g=f_bc) pinns2 = NaturalGradientPinnsElliptic( pde2, bc_type="weak", bc_weight=30.0, one_loss_by_equation=True, matrix_regularization=5e-4, ) new_solving = False if new_solving or not pinns2.load(__file__, "ENG"): pinns2.solve(epochs=150, n_collocation=2000, n_bc_collocation=6000, verbose=True) pinns2.save(__file__, "ENG") print( "############################# Anagram preconditioning ###########################" ) space3 = NNxSpace( 2, 0, GenericMLP, domain_x, sampler, layer_sizes=[16, 16, 16], ) pde3 = BimaterialElectrostatic(space3, f=f_rhs, g=f_bc) pinns3 = AnagramPinnsElliptic( pde3, bc_type="weak", bc_weight=30.0, one_loss_by_equation=True, svd_threshold=1e-1, ) new_solving = False if new_solving or not pinns3.load(__file__, "Anagram"): pinns3.solve(epochs=150, n_collocation=2000, n_bc_collocation=6000, verbose=True) pinns3.save(__file__, "Anagram") print("########################################################\n") plot_abstract_approx_spaces( (pinns.space, pinns2.space, pinns3.space), domain_x, domain_mu, loss=(pinns.losses, pinns2.losses, pinns3.losses), residual=(pinns.pde, pinns2.pde, pinns3.pde), derivatives=["ux"], assemble_labels=([0, 1], [0, 1], [0, 1]), draw_contours=True, n_drawn_contours=20, # parameters_values="mean", title="Solving Bimaterial Electrostatic", titles=("no preconditioning", "ENG", "Anagram"), loss_groups=(["eq bc"], ["eq bc"], ["eq bc"]), ) plt.show()