PINNs: viscous Burgers 1D¶
Solves the viscous Burgers advection equation in 1D using a PINN.
\[\begin{split}\left\{\begin{array}{rl}\partial_t u + \partial_x \frac {u^2}{2} - \sigma \partial_{xx} u & = f \text{ in } \Omega \times (0, T) \\
u & = g \text{ on } \partial \Omega \times (0, T) \\
u & = u_0 \text{ on } \Omega \times {0} \end{array}\right.\end{split}\]
where \(u: \partial \Omega \times (0, T) \to \mathbb{R}\) is the unknown function, \(\Omega \subset \mathbb{R}\) is the spatial domain and \((0, T) \subset \mathbb{R}\) is the time domain. Dirichlet boundary conditions are prescribed.
The equation is solved on a segment domain; strong boundary and initial conditions are used. Three training strategies are compared: standard PINNs, PINNs with energy natural gradient preconditioning and PINNs with Anagram preconditioning.
[1]:
import matplotlib.pyplot as plt
import torch
from scimba_torch.approximation_space.nn_space import NNxtSpace
from scimba_torch.domain.meshless_domain.domain_1d import Segment1D
from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler
from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler
from scimba_torch.integration.monte_carlo_time import UniformTimeSampler
from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP
from scimba_torch.numerical_solvers.temporal_pde.pinns import (
AnagramTemporalPinns,
NaturalGradientTemporalPinns,
TemporalPinns,
)
from scimba_torch.optimizers.optimizers_data import OptimizerData
from scimba_torch.physical_models.temporal_pde.abstract_temporal_pde import (
FirstOrderTemporalPDE,
)
from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces
from scimba_torch.utils.scimba_tensors import LabelTensor
torch.manual_seed(0)
PI = torch.pi
class ViscousBurgers1D(FirstOrderTemporalPDE):
def __init__(self, space, init, f, g, **kwargs):
super().__init__(space, linear=False, **kwargs)
self.init = init
self.f = f
self.g = g
self.residual_size = 1
self.bc_residual_size = 1
self.ic_residual_size = 1
self.σ = kwargs.get("sigma", 1e-2)
def space_operator(self, w, t, x, mu):
u = w.get_components()
u2_x = self.space.grad(u**2 / 2, x)
u_xx = self.space.grad(self.space.grad(u, x), x)
return u2_x - self.σ * u_xx
def time_operator(self, w, t, x, mu):
return self.grad(w.get_components(), t)
def bc_operator(self, w, t, x, n, mu):
return w.get_components()
def rhs(self, w, t, x, mu):
return self.f(w, t, x, mu, self.σ)
def bc_rhs(self, w, t, x, n, mu):
return self.g(w, t, x, n, mu)
def initial_condition(self, x, mu):
return self.init(x, mu)
def functional_operator(self, func, t, x, mu, theta):
# space operator
def u2(t, x, mu, theta):
return func(t, x, mu, theta) ** 2 / 2
dx_u2 = torch.func.jacrev(u2, 1)(t, x, mu, theta).squeeze()
dx_u = torch.func.jacrev(func, 1)
d2x_u = torch.func.jacrev(dx_u, 1)(t, x, mu, theta).squeeze()
space_op = dx_u2 - self.σ * d2x_u
# time operator
time_op = torch.func.jacrev(func, 0)(t, x, mu, theta).squeeze()
# return the sum of both operators
return (time_op + space_op).unsqueeze(0)
# Dirichlet conditions
def functional_operator_bc(self, func, t, x, n, mu, theta):
return func(t, x, mu, theta)
def functional_operator_ic(self, func, x, mu, theta):
t = torch.zeros_like(x)
return func(t, x, mu, theta)
def f_rhs(w, t, x, mu, σ):
x_ = x.get_components()
t_ = t.get_components()
exp_neg_t = torch.exp(-t_)
sin_term = torch.sin(2 * PI * x_)
cos_term = torch.cos(2 * PI * x_)
return exp_neg_t * sin_term * (2 * PI * (cos_term * exp_neg_t + 2 * PI * σ) - 1)
def g_bc(w, t, x, n, mu):
x1 = x.get_components()
return 0 * x1
def functional_exact(t, x, mu):
return torch.sin(2 * PI * x) * torch.exp(-t)
def exact_solution(t, x, mu):
return functional_exact(t.get_components(), x.get_components(), mu.get_components())
def initial_condition(x, mu):
t = LabelTensor(torch.zeros_like(x.x))
return exact_solution(t, x, mu)
def post_processing(inputs, t, x, mu):
x1 = x.get_components()
t1 = t.get_components()
return initial_condition(x, mu) + inputs * t1 * x1 * (1 - x1)
def functional_post_processing(func, t, x, mu, theta):
tz = torch.zeros_like(x)
ini = functional_exact(tz, x, mu)
return ini + func(t, x, mu, theta) * t[0] * x[0] * (1 - x[0])
domain_x = Segment1D((0, 1), is_main_domain=True)
domain_mu = []
t_min, t_max = 0.0, 0.4
domain_t = (t_min, t_max)
sampler = TensorizedSampler(
[
UniformTimeSampler(domain_t),
DomainSampler(domain_x),
UniformParametricSampler(domain_mu),
]
)
space = NNxtSpace(
1,
0,
GenericMLP,
domain_x,
sampler,
layer_sizes=[20, 40, 20],
post_processing=post_processing,
)
pde = ViscousBurgers1D(space, init=initial_condition, f=f_rhs, g=g_bc)
opt_1 = {
"name": "adam",
"optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)},
}
pinn = TemporalPinns(
pde,
bc_type="strong",
ic_type="strong",
optimizers=OptimizerData(opt_1),
)
pinn.solve(epochs=1000, n_collocation=3000, verbose=True)
plot_abstract_approx_spaces(
pinn.space,
domain_x,
domain_mu,
domain_t,
time_values=[0.0, 0.2, t_max],
loss=pinn.losses,
residual=pde,
solution=exact_solution,
error=exact_solution,
title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)
plt.show()
activating optimizer ScimbaAdam
epoch: 0, best loss: 9.388e-01
epoch: 0, loss: 9.388e-01
epoch: 2, best loss: 8.585e-01
epoch: 3, best loss: 7.603e-01
epoch: 4, best loss: 5.853e-01
epoch: 5, best loss: 4.516e-01
epoch: 6, best loss: 2.850e-01
epoch: 7, best loss: 1.456e-01
epoch: 8, best loss: 6.413e-02
epoch: 9, best loss: 3.995e-02
epoch: 31, best loss: 3.730e-02
epoch: 32, best loss: 3.096e-02
epoch: 33, best loss: 2.655e-02
epoch: 35, best loss: 2.475e-02
epoch: 36, best loss: 2.338e-02
epoch: 37, best loss: 1.985e-02
epoch: 38, best loss: 1.467e-02
epoch: 39, best loss: 9.146e-03
epoch: 40, best loss: 7.399e-03
epoch: 44, best loss: 6.265e-03
epoch: 45, best loss: 4.727e-03
epoch: 46, best loss: 4.547e-03
epoch: 51, best loss: 2.996e-03
epoch: 60, best loss: 2.940e-03
epoch: 61, best loss: 2.452e-03
epoch: 62, best loss: 2.206e-03
epoch: 66, best loss: 2.181e-03
epoch: 67, best loss: 1.772e-03
epoch: 71, best loss: 1.766e-03
epoch: 72, best loss: 1.503e-03
epoch: 77, best loss: 1.192e-03
epoch: 86, best loss: 1.158e-03
epoch: 87, best loss: 1.135e-03
epoch: 90, best loss: 1.128e-03
epoch: 91, best loss: 1.086e-03
epoch: 95, best loss: 1.057e-03
epoch: 97, best loss: 1.040e-03
epoch: 100, best loss: 1.030e-03
epoch: 100, loss: 1.030e-03
epoch: 105, best loss: 1.024e-03
epoch: 107, best loss: 1.009e-03
epoch: 111, best loss: 1.006e-03
epoch: 116, best loss: 9.931e-04
epoch: 117, best loss: 9.859e-04
epoch: 118, best loss: 9.790e-04
epoch: 121, best loss: 9.269e-04
epoch: 122, best loss: 8.573e-04
epoch: 154, best loss: 8.543e-04
epoch: 159, best loss: 8.340e-04
epoch: 165, best loss: 8.084e-04
epoch: 166, best loss: 7.997e-04
epoch: 178, best loss: 7.871e-04
epoch: 179, best loss: 7.547e-04
epoch: 190, best loss: 7.341e-04
epoch: 200, loss: 7.645e-04
epoch: 211, best loss: 7.274e-04
epoch: 212, best loss: 6.454e-04
epoch: 235, best loss: 6.283e-04
epoch: 249, best loss: 6.087e-04
epoch: 258, best loss: 5.869e-04
epoch: 265, best loss: 5.438e-04
epoch: 271, best loss: 5.197e-04
epoch: 300, loss: 5.997e-04
epoch: 310, best loss: 5.112e-04
epoch: 314, best loss: 5.007e-04
epoch: 350, best loss: 4.598e-04
epoch: 361, best loss: 4.590e-04
epoch: 366, best loss: 4.457e-04
epoch: 375, best loss: 4.436e-04
epoch: 381, best loss: 4.345e-04
epoch: 382, best loss: 4.150e-04
epoch: 400, loss: 4.794e-04
epoch: 406, best loss: 4.114e-04
epoch: 411, best loss: 3.979e-04
epoch: 421, best loss: 3.970e-04
epoch: 422, best loss: 3.877e-04
epoch: 424, best loss: 3.791e-04
epoch: 441, best loss: 3.772e-04
epoch: 446, best loss: 3.759e-04
epoch: 456, best loss: 3.753e-04
epoch: 486, best loss: 3.543e-04
epoch: 498, best loss: 3.508e-04
epoch: 500, loss: 4.322e-04
epoch: 525, best loss: 3.487e-04
epoch: 541, best loss: 3.333e-04
epoch: 555, best loss: 3.320e-04
epoch: 583, best loss: 3.277e-04
epoch: 584, best loss: 2.998e-04
epoch: 596, best loss: 2.932e-04
epoch: 600, loss: 3.292e-04
epoch: 632, best loss: 2.891e-04
epoch: 684, best loss: 2.659e-04
epoch: 700, loss: 2.789e-04
epoch: 717, best loss: 2.594e-04
epoch: 727, best loss: 2.572e-04
epoch: 765, best loss: 2.536e-04
epoch: 779, best loss: 2.520e-04
epoch: 786, best loss: 2.466e-04
epoch: 800, loss: 2.819e-04
epoch: 802, best loss: 2.432e-04
epoch: 834, best loss: 2.398e-04
epoch: 841, best loss: 2.398e-04
epoch: 852, best loss: 2.346e-04
epoch: 855, best loss: 2.064e-04
epoch: 900, loss: 2.189e-04
epoch: 982, best loss: 1.933e-04
Training done!
Final loss value: 2.074e-04
Best loss value: 1.933e-04
[2]:
space2 = NNxtSpace(
1,
0,
GenericMLP,
domain_x,
sampler,
layer_sizes=[32],
post_processing=post_processing,
)
pde2 = ViscousBurgers1D(space2, init=initial_condition, f=f_rhs, g=g_bc)
pinn2 = NaturalGradientTemporalPinns(
pde2,
bc_type="strong",
ic_type="strong",
matrix_regularization=1e-4,
functional_post_processing=functional_post_processing,
)
pinn2.solve(epochs=50, n_collocation=2000, verbose=True)
plot_abstract_approx_spaces(
pinn2.space,
domain_x,
domain_mu,
domain_t,
time_values=[0.0, 0.2, t_max],
loss=pinn2.losses,
residual=pde2,
solution=exact_solution,
error=exact_solution,
title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)
plt.show()
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.026e+00
epoch: 0, loss: 1.026e+00
epoch: 1, best loss: 1.521e-01
epoch: 2, best loss: 1.313e-01
epoch: 3, best loss: 1.146e-01
epoch: 5, best loss: 7.984e-02
epoch: 6, best loss: 3.626e-02
epoch: 7, best loss: 2.166e-02
epoch: 8, best loss: 9.185e-03
epoch: 9, best loss: 5.929e-03
epoch: 10, best loss: 7.804e-04
epoch: 11, best loss: 2.133e-04
epoch: 12, best loss: 1.566e-04
epoch: 13, best loss: 2.607e-05
epoch: 14, best loss: 2.073e-05
epoch: 15, best loss: 1.868e-05
epoch: 16, best loss: 1.590e-05
epoch: 17, best loss: 1.545e-05
epoch: 18, best loss: 1.318e-05
epoch: 19, best loss: 1.144e-05
epoch: 20, best loss: 9.190e-06
epoch: 21, best loss: 6.955e-06
epoch: 22, best loss: 6.318e-06
epoch: 23, best loss: 5.072e-06
epoch: 24, best loss: 4.661e-06
epoch: 25, best loss: 4.064e-06
epoch: 26, best loss: 3.342e-06
epoch: 27, best loss: 3.156e-06
epoch: 28, best loss: 2.850e-06
epoch: 29, best loss: 2.844e-06
epoch: 30, best loss: 2.709e-06
epoch: 33, best loss: 2.604e-06
epoch: 34, best loss: 2.540e-06
epoch: 35, best loss: 2.538e-06
epoch: 36, best loss: 2.379e-06
epoch: 38, best loss: 2.328e-06
epoch: 40, best loss: 2.273e-06
epoch: 42, best loss: 2.211e-06
epoch: 48, best loss: 2.040e-06
Training done!
Final loss value: 2.254e-06
Best loss value: 2.040e-06
[3]:
space3 = NNxtSpace(
1,
0,
GenericMLP,
domain_x,
sampler,
layer_sizes=[32],
post_processing=post_processing,
)
pde3 = ViscousBurgers1D(space3, init=initial_condition, f=f_rhs, g=g_bc)
pinn3 = AnagramTemporalPinns(
pde3,
bc_type="strong",
ic_type="strong",
svd_threshold=5e-3,
functional_post_processing=functional_post_processing,
)
pinn3.solve(epochs=50, n_collocation=2000, verbose=True)
plot_abstract_approx_spaces(
pinn3.space,
domain_x,
domain_mu,
domain_t,
time_values=[0.0, 0.2, t_max],
loss=pinn3.losses,
residual=pde3,
solution=exact_solution,
error=exact_solution,
title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)
plt.show()
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.034e+00
epoch: 0, loss: 1.034e+00
epoch: 5, best loss: 9.914e-01
epoch: 6, best loss: 9.716e-01
epoch: 11, best loss: 9.019e-01
epoch: 12, best loss: 8.590e-01
epoch: 15, best loss: 7.456e-01
epoch: 16, best loss: 6.042e-01
epoch: 17, best loss: 5.910e-01
epoch: 18, best loss: 2.832e-01
epoch: 19, best loss: 9.400e-02
epoch: 20, best loss: 1.341e-02
epoch: 21, best loss: 5.326e-03
epoch: 22, best loss: 2.739e-03
epoch: 23, best loss: 2.022e-03
epoch: 24, best loss: 5.854e-04
epoch: 25, best loss: 3.184e-04
epoch: 26, best loss: 2.443e-04
epoch: 27, best loss: 2.161e-05
epoch: 28, best loss: 1.202e-05
epoch: 29, best loss: 5.080e-06
epoch: 30, best loss: 4.102e-06
epoch: 31, best loss: 2.451e-06
epoch: 32, best loss: 3.032e-07
epoch: 33, best loss: 2.376e-07
epoch: 34, best loss: 2.154e-07
epoch: 35, best loss: 2.016e-07
epoch: 41, best loss: 1.851e-07
Training done!
Final loss value: 1.934e-07
Best loss value: 1.851e-07
[4]:
plot_abstract_approx_spaces(
(pinn.space, pinn2.space, pinn3.space),
domain_x,
domain_mu,
domain_t,
loss=(pinn.losses, pinn2.losses, pinn3.losses),
residual=(pde, pde2, pde3),
solution=exact_solution,
error=exact_solution,
title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
titles=("no preconditioner", "ENG preconditioner", "Anagram preconditioner"),
)
plt.show()
[ ]: