PINNs: grad-div system 2D¶
Solves a grad-div system in 2D with Dirichlet boundary conditions using a PINN.
\[\begin{split}\left\{\begin{array}{rl} -\nabla (\nabla \cdot u) + u & = f \text{ in } \Omega \times M \\
u & = g \text{ on } \partial \Omega \times M\end{array}\right.\end{split}\]
where \(u: \Omega \times M \to \mathbb{R}^2\) is the unknown function, \(\Omega \subset \mathbb{R}^2\) is the spatial domain and \(M \subset \mathbb{R}\) is the parametric domain and \(f: \Omega \times M \to \mathbb{R}^2\).
The equation is solved on a square domain with PINNs with energy natural gradient preconditioning and PINNs with Anagram preconditioning.
Weak and strong boundary conditions are used.
[1]:
from typing import Callable, Tuple
import matplotlib.pyplot as plt
import torch
from scimba_torch.approximation_space.abstract_space import AbstractApproxSpace
from scimba_torch.approximation_space.nn_space import NNxSpace
from scimba_torch.domain.meshless_domain.domain_2d import Square2D
from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler
from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler
from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP
from scimba_torch.numerical_solvers.elliptic_pde.pinns import (
AnagramPinnsElliptic,
NaturalGradientPinnsElliptic,
)
from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces
from scimba_torch.utils.scimba_tensors import LabelTensor, MultiLabelTensor
from scimba_torch.utils.typing_protocols import VarArgCallable
torch.manual_seed(0)
class GradDiv2D:
def __init__(
self,
space: AbstractApproxSpace,
f: Callable,
g: Callable,
**kwargs,
):
self.space = space
self.f = f
self.g = g
self.linear = True
self.residual_size = 2
self.bc_residual_size = 2
def grad(
self,
w: torch.Tensor | MultiLabelTensor,
y: torch.Tensor | LabelTensor,
) -> torch.Tensor | Tuple[torch.Tensor, ...]:
return self.space.grad(w, y)
def rhs(
self, w: MultiLabelTensor, x: LabelTensor, mu: LabelTensor
) -> Tuple[torch.Tensor]:
return self.f(x, mu)
def bc_rhs(
self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor
) -> Tuple[torch.Tensor]:
return self.g(x, mu)
def operator(
self, w: MultiLabelTensor, xs: LabelTensor, mu: LabelTensor
) -> Tuple[torch.Tensor]:
x, y = xs.get_components()
u, v = w.get_components()
u_x, u_y = self.grad(u, xs)
u_xx, u_xy = self.grad(u_x, xs)
v_x, v_y = self.grad(v, xs)
v_yx, v_yy = self.grad(v_y, xs)
return u_xx + v_yx + u, u_xy + v_yy + v
def restrict_to_component(self, i: int, func):
return lambda *args: func(*args)[i : i + 1, ...]
def functional_operator(
self,
func: VarArgCallable,
x: torch.Tensor,
mu: torch.Tensor,
theta: torch.Tensor,
) -> torch.Tensor:
uv_x = func(x, mu, theta)
grad_u = self.restrict_to_component(0, torch.func.jacrev(func, 0))
grad_v = self.restrict_to_component(1, torch.func.jacrev(func, 0))
hessian_u = torch.func.jacrev(grad_u, 0)(x, mu, theta).squeeze()
hessian_v = torch.func.jacrev(grad_v, 0)(x, mu, theta).squeeze()
res = hessian_u[..., 0] + hessian_v[..., 1] + uv_x
return res
# Dirichlet conditions
def bc_operator(
self, w: MultiLabelTensor, x: LabelTensor, n: LabelTensor, mu: LabelTensor
) -> Tuple[torch.Tensor]:
u, v = w.get_components()
return u, v
def functional_operator_bc(
self,
func: VarArgCallable,
x: torch.Tensor,
n: torch.Tensor,
mu: torch.Tensor,
theta: torch.Tensor,
) -> torch.Tensor:
return func(x, mu, theta)
def exact_solution(xs: LabelTensor, mu: LabelTensor) -> torch.Tensor:
x, y = xs.get_components()
alpha = mu.get_components()
return torch.cat(
(
torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y),
alpha * torch.sin(2.0 * torch.pi * x) * torch.sin(2.0 * torch.pi * y),
),
dim=-1,
)
def f_rhs(xs: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]:
x, y = xs.get_components()
alpha = mu.get_components()
PI = torch.pi
cos_x = torch.cos(2.0 * PI * x)
cos_y = torch.cos(2.0 * PI * y)
sin_x = torch.sin(2.0 * PI * x)
sin_y = torch.sin(2.0 * PI * y)
f1 = (1 - 4 * PI**2) * sin_x * sin_y + 4 * PI**2 * alpha * cos_x * cos_y
f2 = (1 - 4 * PI**2 * alpha) * sin_x * sin_y + 4 * PI**2 * cos_x * cos_y
return f1, f2
def f_bc(xs: LabelTensor, mu: LabelTensor) -> Tuple[torch.Tensor]:
x, _ = xs.get_components()
return torch.zeros_like(x), torch.zeros_like(x)
bc_weight = 10.0
domain_mu = [(0.75, 0.75)]
domain_x = Square2D([(0.0, 1), (0.0, 1)], is_main_domain=True)
sampler = TensorizedSampler(
[DomainSampler(domain_x), UniformParametricSampler(domain_mu)]
)
ENG preconditioned PINN, weak boundary conditions¶
[2]:
space = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64])
pde = GradDiv2D(space, f_rhs, f_bc)
pinns = NaturalGradientPinnsElliptic(
pde,
bc_type="weak",
bc_weight=bc_weight,
one_loss_by_equation=True,
matrix_regularization=1e-6,
)
pinns.solve(epochs=100, n_collocation=3000, n_bc_collocation=1000, verbose=True)
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.187e+03
epoch: 0, loss: 1.187e+03
epoch: 6, best loss: 8.863e+02
epoch: 7, best loss: 4.695e+02
epoch: 8, best loss: 2.227e+02
epoch: 9, best loss: 1.975e+02
epoch: 10, best loss: 1.643e+02
epoch: 11, best loss: 1.305e+02
epoch: 12, best loss: 1.185e+02
epoch: 13, best loss: 5.498e+01
epoch: 14, best loss: 2.025e+01
epoch: 15, best loss: 5.500e+00
epoch: 16, best loss: 1.770e+00
epoch: 17, best loss: 8.329e-01
epoch: 18, best loss: 4.628e-01
epoch: 19, best loss: 3.801e-01
epoch: 20, best loss: 2.729e-01
epoch: 21, best loss: 1.524e-01
epoch: 22, best loss: 7.866e-02
epoch: 23, best loss: 1.793e-02
epoch: 24, best loss: 7.101e-03
epoch: 25, best loss: 4.004e-03
epoch: 26, best loss: 2.936e-03
epoch: 27, best loss: 2.475e-03
epoch: 28, best loss: 2.127e-03
epoch: 29, best loss: 2.055e-03
epoch: 30, best loss: 1.831e-03
epoch: 31, best loss: 1.632e-03
epoch: 32, best loss: 1.443e-03
epoch: 33, best loss: 1.277e-03
epoch: 34, best loss: 1.181e-03
epoch: 35, best loss: 1.104e-03
epoch: 36, best loss: 1.095e-03
epoch: 37, best loss: 1.081e-03
epoch: 38, best loss: 9.631e-04
epoch: 48, best loss: 8.895e-04
epoch: 51, best loss: 8.725e-04
epoch: 53, best loss: 8.534e-04
epoch: 67, best loss: 8.243e-04
epoch: 71, best loss: 8.083e-04
epoch: 77, best loss: 8.054e-04
epoch: 87, best loss: 7.843e-04
epoch: 90, best loss: 7.681e-04
Training done!
Final loss value: 8.208e-04
Best loss value: 7.681e-04
Anagram preconditioned PINN, weak boundary conditions¶
[3]:
space2 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64])
pde2 = GradDiv2D(space2, f_rhs, f_bc)
pinns2 = AnagramPinnsElliptic(
pde2,
bc_type="weak",
bc_weight=bc_weight,
one_loss_by_equation=True,
svd_threshold=5e-2,
)
pinns2.solve(epochs=100, n_collocation=3000, n_bc_collocation=1000, verbose=True)
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.164e+03
epoch: 0, loss: 1.164e+03
epoch: 18, best loss: 8.480e+02
epoch: 19, best loss: 5.002e+02
epoch: 20, best loss: 4.685e+02
epoch: 21, best loss: 3.914e+02
epoch: 22, best loss: 2.957e+02
epoch: 23, best loss: 2.436e+02
epoch: 24, best loss: 1.911e+02
epoch: 25, best loss: 1.452e+02
epoch: 26, best loss: 3.642e+01
epoch: 27, best loss: 1.812e+01
epoch: 28, best loss: 7.986e+00
epoch: 29, best loss: 5.778e+00
epoch: 30, best loss: 5.474e+00
epoch: 31, best loss: 4.938e+00
epoch: 32, best loss: 3.282e+00
epoch: 33, best loss: 2.551e+00
epoch: 34, best loss: 1.865e+00
epoch: 35, best loss: 1.016e+00
epoch: 36, best loss: 5.133e-01
epoch: 37, best loss: 3.869e-01
epoch: 38, best loss: 1.882e-01
epoch: 39, best loss: 9.455e-02
epoch: 40, best loss: 6.518e-02
epoch: 41, best loss: 5.510e-02
epoch: 42, best loss: 4.507e-02
epoch: 43, best loss: 3.249e-02
epoch: 44, best loss: 3.083e-02
epoch: 45, best loss: 2.796e-02
epoch: 46, best loss: 2.543e-02
epoch: 47, best loss: 2.086e-02
epoch: 48, best loss: 1.754e-02
epoch: 49, best loss: 1.462e-02
epoch: 50, best loss: 1.296e-02
epoch: 51, best loss: 1.105e-02
epoch: 52, best loss: 9.611e-03
epoch: 53, best loss: 8.886e-03
epoch: 54, best loss: 8.526e-03
epoch: 55, best loss: 8.218e-03
epoch: 56, best loss: 7.773e-03
epoch: 57, best loss: 6.944e-03
epoch: 58, best loss: 6.787e-03
epoch: 60, best loss: 6.568e-03
epoch: 61, best loss: 6.526e-03
epoch: 62, best loss: 6.138e-03
epoch: 64, best loss: 6.100e-03
epoch: 66, best loss: 5.991e-03
epoch: 69, best loss: 5.854e-03
epoch: 72, best loss: 5.827e-03
epoch: 74, best loss: 5.713e-03
epoch: 76, best loss: 5.616e-03
epoch: 87, best loss: 5.584e-03
epoch: 96, best loss: 5.575e-03
epoch: 99, best loss: 5.538e-03
Training done!
Final loss value: 5.548e-03
Best loss value: 5.538e-03
[4]:
plot_abstract_approx_spaces(
pinns.space,
domain_x,
domain_mu,
loss=pinns.losses,
error=exact_solution,
draw_contours=True,
n_drawn_contours=20,
title="solving GradDiv2D with ENG preconditioned PINN",
)
plt.show()
plot_abstract_approx_spaces(
pinns2.space,
domain_x,
domain_mu,
loss=pinns2.losses,
error=exact_solution,
draw_contours=True,
n_drawn_contours=20,
title="solving GradDiv2D with Anagram preconditioned PINN",
)
plt.show()
Strong boundary conditions¶
[5]:
def post_processing(inputs: torch.Tensor, xs: LabelTensor, mu: LabelTensor):
x, y = xs.get_components()
# _ = mu.get_components()
phi = x * (x - 1.0) * y * (y - 1.0)
return inputs * phi
def functional_post_processing(
func, x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor
) -> torch.Tensor:
phi = x[0] * (x[0] - 1.0) * x[1] * (x[1] - 1.0)
return func(x, mu, theta) * phi
ENG preconditioned PINN, strong boundary conditions¶
[6]:
space3 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing)
pde3 = GradDiv2D(space3, f_rhs, f_bc)
pinns3 = NaturalGradientPinnsElliptic(
pde3,
bc_type="strong",
one_loss_by_equation=True,
matrix_regularization=1e-6,
functional_post_processing=functional_post_processing,
)
pinns3.solve(epochs=50, n_collocation=3000, verbose=True)
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.186e+03
epoch: 0, loss: 1.186e+03
epoch: 4, best loss: 9.399e+02
epoch: 5, best loss: 9.278e+02
epoch: 6, best loss: 8.645e+02
epoch: 7, best loss: 8.380e+02
epoch: 8, best loss: 6.795e+02
epoch: 9, best loss: 5.968e+02
epoch: 10, best loss: 4.763e+02
epoch: 11, best loss: 3.300e+02
epoch: 12, best loss: 2.229e+02
epoch: 13, best loss: 9.942e+01
epoch: 14, best loss: 9.582e+01
epoch: 15, best loss: 7.084e+01
epoch: 16, best loss: 5.207e+01
epoch: 17, best loss: 2.797e+01
epoch: 18, best loss: 1.751e+01
epoch: 19, best loss: 1.431e+01
epoch: 20, best loss: 5.647e+00
epoch: 21, best loss: 3.861e+00
epoch: 22, best loss: 2.183e+00
epoch: 23, best loss: 1.075e+00
epoch: 24, best loss: 4.906e-01
epoch: 25, best loss: 2.642e-01
epoch: 26, best loss: 1.743e-01
epoch: 27, best loss: 1.200e-01
epoch: 28, best loss: 8.177e-02
epoch: 29, best loss: 2.851e-02
epoch: 30, best loss: 2.349e-02
epoch: 31, best loss: 1.335e-02
epoch: 32, best loss: 6.307e-03
epoch: 33, best loss: 2.736e-03
epoch: 34, best loss: 2.411e-03
epoch: 35, best loss: 2.402e-03
epoch: 36, best loss: 2.231e-03
epoch: 37, best loss: 2.115e-03
epoch: 39, best loss: 2.065e-03
epoch: 41, best loss: 1.985e-03
epoch: 42, best loss: 1.910e-03
epoch: 45, best loss: 1.892e-03
epoch: 46, best loss: 1.823e-03
epoch: 47, best loss: 1.729e-03
Training done!
Final loss value: 1.725e-03
Best loss value: 1.729e-03
Anagram preconditioned PINN, strong boundary conditions¶
[7]:
space4 = NNxSpace(2, 1, GenericMLP, domain_x, sampler, layer_sizes=[64], post_processing=post_processing)
pde4 = GradDiv2D(space4, f_rhs, f_bc)
pinns4 = AnagramPinnsElliptic(
pde4,
bc_type="strong",
one_loss_by_equation=True,
svd_threshold=5e-3,
functional_post_processing=functional_post_processing,
)
pinns4.solve(epochs=200, n_collocation=3000, verbose=True)
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.185e+03
epoch: 0, loss: 1.185e+03
epoch: 1, best loss: 1.181e+03
epoch: 2, best loss: 1.176e+03
epoch: 3, best loss: 1.158e+03
epoch: 4, best loss: 1.142e+03
epoch: 5, best loss: 1.129e+03
epoch: 8, best loss: 9.615e+02
epoch: 9, best loss: 7.664e+02
epoch: 10, best loss: 7.253e+02
epoch: 11, best loss: 6.925e+02
epoch: 12, best loss: 6.380e+02
epoch: 13, best loss: 5.574e+02
epoch: 14, best loss: 5.205e+02
epoch: 15, best loss: 4.750e+02
epoch: 16, best loss: 2.980e+02
epoch: 17, best loss: 2.883e+02
epoch: 18, best loss: 2.780e+02
epoch: 19, best loss: 2.135e+02
epoch: 20, best loss: 1.766e+02
epoch: 21, best loss: 1.584e+02
epoch: 22, best loss: 1.250e+02
epoch: 23, best loss: 1.148e+02
epoch: 24, best loss: 1.030e+02
epoch: 25, best loss: 6.275e+01
epoch: 26, best loss: 5.841e+01
epoch: 27, best loss: 5.044e+01
epoch: 28, best loss: 3.358e+01
epoch: 29, best loss: 3.195e+01
epoch: 30, best loss: 1.905e+01
epoch: 31, best loss: 1.816e+01
epoch: 32, best loss: 1.607e+01
epoch: 33, best loss: 1.584e+01
epoch: 34, best loss: 1.178e+01
epoch: 36, best loss: 1.028e+01
epoch: 37, best loss: 6.866e+00
epoch: 38, best loss: 5.576e+00
epoch: 39, best loss: 4.982e+00
epoch: 40, best loss: 4.402e+00
epoch: 41, best loss: 3.635e+00
epoch: 42, best loss: 3.549e+00
epoch: 43, best loss: 2.289e+00
epoch: 44, best loss: 1.437e+00
epoch: 45, best loss: 1.376e+00
epoch: 46, best loss: 1.355e+00
epoch: 47, best loss: 1.329e+00
epoch: 48, best loss: 1.159e+00
epoch: 49, best loss: 1.126e+00
epoch: 50, best loss: 1.022e+00
epoch: 51, best loss: 7.298e-01
epoch: 52, best loss: 6.850e-01
epoch: 53, best loss: 6.290e-01
epoch: 54, best loss: 5.523e-01
epoch: 55, best loss: 4.639e-01
epoch: 56, best loss: 4.345e-01
epoch: 57, best loss: 3.530e-01
epoch: 59, best loss: 3.016e-01
epoch: 60, best loss: 2.946e-01
epoch: 61, best loss: 2.807e-01
epoch: 62, best loss: 2.586e-01
epoch: 63, best loss: 2.558e-01
epoch: 64, best loss: 1.854e-01
epoch: 65, best loss: 1.511e-01
epoch: 66, best loss: 1.425e-01
epoch: 67, best loss: 1.259e-01
epoch: 69, best loss: 1.211e-01
epoch: 70, best loss: 1.192e-01
epoch: 71, best loss: 8.800e-02
epoch: 72, best loss: 8.199e-02
epoch: 73, best loss: 7.392e-02
epoch: 74, best loss: 7.158e-02
epoch: 75, best loss: 6.658e-02
epoch: 76, best loss: 6.580e-02
epoch: 77, best loss: 5.822e-02
epoch: 78, best loss: 5.423e-02
epoch: 80, best loss: 4.673e-02
epoch: 81, best loss: 4.062e-02
epoch: 83, best loss: 3.784e-02
epoch: 84, best loss: 3.435e-02
epoch: 85, best loss: 3.382e-02
epoch: 86, best loss: 3.200e-02
epoch: 87, best loss: 3.148e-02
epoch: 88, best loss: 2.772e-02
epoch: 89, best loss: 2.737e-02
epoch: 90, best loss: 2.422e-02
epoch: 91, best loss: 2.170e-02
epoch: 94, best loss: 2.165e-02
epoch: 95, best loss: 1.999e-02
epoch: 97, best loss: 1.938e-02
epoch: 98, best loss: 1.860e-02
epoch: 99, best loss: 1.769e-02
epoch: 100, best loss: 1.599e-02
epoch: 100, loss: 1.599e-02
epoch: 104, best loss: 1.480e-02
epoch: 105, best loss: 1.457e-02
epoch: 107, best loss: 1.432e-02
epoch: 109, best loss: 1.331e-02
epoch: 112, best loss: 1.321e-02
epoch: 113, best loss: 1.275e-02
epoch: 114, best loss: 1.255e-02
epoch: 115, best loss: 1.245e-02
epoch: 116, best loss: 1.142e-02
epoch: 117, best loss: 1.121e-02
epoch: 118, best loss: 1.025e-02
epoch: 121, best loss: 1.018e-02
epoch: 122, best loss: 1.004e-02
epoch: 124, best loss: 9.550e-03
epoch: 125, best loss: 9.125e-03
epoch: 127, best loss: 8.791e-03
epoch: 128, best loss: 8.469e-03
epoch: 132, best loss: 8.021e-03
epoch: 133, best loss: 7.959e-03
epoch: 136, best loss: 7.822e-03
epoch: 137, best loss: 7.396e-03
epoch: 138, best loss: 7.377e-03
epoch: 141, best loss: 7.252e-03
epoch: 142, best loss: 7.245e-03
epoch: 143, best loss: 6.758e-03
epoch: 145, best loss: 6.507e-03
epoch: 147, best loss: 6.289e-03
epoch: 150, best loss: 5.832e-03
epoch: 154, best loss: 5.368e-03
epoch: 156, best loss: 5.345e-03
epoch: 159, best loss: 5.289e-03
epoch: 162, best loss: 4.416e-03
epoch: 166, best loss: 4.062e-03
epoch: 171, best loss: 3.685e-03
epoch: 172, best loss: 3.560e-03
epoch: 177, best loss: 3.558e-03
epoch: 179, best loss: 3.070e-03
epoch: 183, best loss: 2.968e-03
epoch: 186, best loss: 2.894e-03
epoch: 189, best loss: 2.884e-03
epoch: 191, best loss: 2.715e-03
epoch: 196, best loss: 2.713e-03
epoch: 199, best loss: 2.700e-03
Training done!
Final loss value: 2.602e-03
Best loss value: 2.700e-03
[8]:
plot_abstract_approx_spaces(
pinns3.space,
domain_x,
domain_mu,
loss=pinns3.losses,
error=exact_solution,
draw_contours=True,
n_drawn_contours=20,
title="solving GradDiv2D with ENG preconditioned PINN",
)
plt.show()
plot_abstract_approx_spaces(
pinns4.space,
domain_x,
domain_mu,
loss=pinns4.losses,
error=exact_solution,
draw_contours=True,
n_drawn_contours=20,
title="solving GradDiv2D with Anagram preconditioned PINN",
)
plt.show()
[ ]: