PINNs: viscous Burgers 1D

Solves the viscous Burgers advection equation in 1D using a PINN.

\[\begin{split}\left\{\begin{array}{rl}\partial_t u + \partial_x \frac {u^2}{2} - \sigma \partial_{xx} u & = f \text{ in } \Omega \times (0, T) \\ u & = g \text{ on } \partial \Omega \times (0, T) \\ u & = u_0 \text{ on } \Omega \times {0} \end{array}\right.\end{split}\]

where \(u: \partial \Omega \times (0, T) \to \mathbb{R}\) is the unknown function, \(\Omega \subset \mathbb{R}\) is the spatial domain and \((0, T) \subset \mathbb{R}\) is the time domain. Dirichlet boundary conditions are prescribed.

The equation is solved on a segment domain; strong boundary and initial conditions are used. Three training strategies are compared: standard PINNs, PINNs with energy natural gradient preconditioning and PINNs with Anagram preconditioning.

[1]:
import matplotlib.pyplot as plt
import torch

from scimba_torch.approximation_space.nn_space import NNxtSpace
from scimba_torch.domain.meshless_domain.domain_1d import Segment1D
from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler
from scimba_torch.integration.monte_carlo_parameters import UniformParametricSampler
from scimba_torch.integration.monte_carlo_time import UniformTimeSampler
from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP
from scimba_torch.numerical_solvers.temporal_pde.pinns import (
    AnagramTemporalPinns,
    NaturalGradientTemporalPinns,
    TemporalPinns,
)
from scimba_torch.optimizers.optimizers_data import OptimizerData
from scimba_torch.physical_models.temporal_pde.abstract_temporal_pde import (
    FirstOrderTemporalPDE,
)
from scimba_torch.plots.plots_nd import plot_abstract_approx_spaces
from scimba_torch.utils.scimba_tensors import LabelTensor

torch.manual_seed(0)

PI = torch.pi

class ViscousBurgers1D(FirstOrderTemporalPDE):

    def __init__(self, space, init, f, g, **kwargs):
        super().__init__(space, linear=False, **kwargs)
        self.init = init
        self.f = f
        self.g = g

        self.residual_size = 1
        self.bc_residual_size = 1
        self.ic_residual_size = 1

        self.σ = kwargs.get("sigma", 1e-2)

    def space_operator(self, w, t, x, mu):
        u = w.get_components()
        u2_x = self.space.grad(u**2 / 2, x)
        u_xx = self.space.grad(self.space.grad(u, x), x)
        return u2_x - self.σ * u_xx

    def time_operator(self, w, t, x, mu):
        return self.grad(w.get_components(), t)

    def bc_operator(self, w, t, x, n, mu):
        return w.get_components()

    def rhs(self, w, t, x, mu):
        return self.f(w, t, x, mu, self.σ)

    def bc_rhs(self, w, t, x, n, mu):
        return self.g(w, t, x, n, mu)

    def initial_condition(self, x, mu):
        return self.init(x, mu)

    def functional_operator(self, func, t, x, mu, theta):
        # space operator
        def u2(t, x, mu, theta):
            return func(t, x, mu, theta) ** 2 / 2

        dx_u2 = torch.func.jacrev(u2, 1)(t, x, mu, theta).squeeze()

        dx_u = torch.func.jacrev(func, 1)
        d2x_u = torch.func.jacrev(dx_u, 1)(t, x, mu, theta).squeeze()

        space_op = dx_u2 - self.σ * d2x_u

        # time operator
        time_op = torch.func.jacrev(func, 0)(t, x, mu, theta).squeeze()

        # return the sum of both operators
        return (time_op + space_op).unsqueeze(0)

    # Dirichlet conditions
    def functional_operator_bc(self, func, t, x, n, mu, theta):
        return func(t, x, mu, theta)

    def functional_operator_ic(self, func, x, mu, theta):
        t = torch.zeros_like(x)
        return func(t, x, mu, theta)


def f_rhs(w, t, x, mu, σ):
    x_ = x.get_components()
    t_ = t.get_components()

    exp_neg_t = torch.exp(-t_)
    sin_term = torch.sin(2 * PI * x_)
    cos_term = torch.cos(2 * PI * x_)

    return exp_neg_t * sin_term * (2 * PI * (cos_term * exp_neg_t + 2 * PI * σ) - 1)


def g_bc(w, t, x, n, mu):
    x1 = x.get_components()
    return 0 * x1


def functional_exact(t, x, mu):
    return torch.sin(2 * PI * x) * torch.exp(-t)


def exact_solution(t, x, mu):
    return functional_exact(t.get_components(), x.get_components(), mu.get_components())


def initial_condition(x, mu):
    t = LabelTensor(torch.zeros_like(x.x))
    return exact_solution(t, x, mu)


def post_processing(inputs, t, x, mu):
    x1 = x.get_components()
    t1 = t.get_components()
    return initial_condition(x, mu) + inputs * t1 * x1 * (1 - x1)


def functional_post_processing(func, t, x, mu, theta):
    tz = torch.zeros_like(x)
    ini = functional_exact(tz, x, mu)
    return ini + func(t, x, mu, theta) * t[0] * x[0] * (1 - x[0])


domain_x = Segment1D((0, 1), is_main_domain=True)
domain_mu = []

t_min, t_max = 0.0, 0.4
domain_t = (t_min, t_max)

sampler = TensorizedSampler(
    [
        UniformTimeSampler(domain_t),
        DomainSampler(domain_x),
        UniformParametricSampler(domain_mu),
    ]
)

space = NNxtSpace(
    1,
    0,
    GenericMLP,
    domain_x,
    sampler,
    layer_sizes=[20, 40, 20],
    post_processing=post_processing,
)
pde = ViscousBurgers1D(space, init=initial_condition, f=f_rhs, g=g_bc)

opt_1 = {
    "name": "adam",
    "optimizer_args": {"lr": 1.8e-2, "betas": (0.9, 0.999)},
}
pinn = TemporalPinns(
    pde,
    bc_type="strong",
    ic_type="strong",
    optimizers=OptimizerData(opt_1),
)

pinn.solve(epochs=1000, n_collocation=3000, verbose=True)

plot_abstract_approx_spaces(
    pinn.space,
    domain_x,
    domain_mu,
    domain_t,
    time_values=[0.0, 0.2, t_max],
    loss=pinn.losses,
    residual=pde,
    solution=exact_solution,
    error=exact_solution,
    title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)

plt.show()
activating optimizer ScimbaAdam
epoch: 0, best loss: 9.124e-01
epoch: 0,      loss: 9.124e-01
epoch: 1, best loss: 8.456e-01
epoch: 2, best loss: 7.034e-01
epoch: 3, best loss: 5.224e-01
epoch: 4, best loss: 3.160e-01
epoch: 5, best loss: 1.818e-01
epoch: 6, best loss: 9.085e-02
epoch: 7, best loss: 5.400e-02
epoch: 30, best loss: 5.226e-02
epoch: 31, best loss: 4.066e-02
epoch: 32, best loss: 2.901e-02
epoch: 33, best loss: 2.354e-02
epoch: 34, best loss: 2.282e-02
epoch: 36, best loss: 2.183e-02
epoch: 37, best loss: 1.851e-02
epoch: 38, best loss: 1.166e-02
epoch: 39, best loss: 6.419e-03
epoch: 40, best loss: 4.977e-03
epoch: 44, best loss: 2.813e-03
epoch: 54, best loss: 2.532e-03
epoch: 58, best loss: 2.308e-03
epoch: 59, best loss: 1.881e-03
epoch: 63, best loss: 1.793e-03
epoch: 64, best loss: 1.641e-03
epoch: 68, best loss: 1.508e-03
epoch: 69, best loss: 1.436e-03
epoch: 73, best loss: 1.343e-03
epoch: 74, best loss: 1.204e-03
epoch: 78, best loss: 1.177e-03
epoch: 79, best loss: 1.151e-03
epoch: 83, best loss: 1.126e-03
epoch: 88, best loss: 1.088e-03
epoch: 99, best loss: 1.074e-03
epoch: 100,      loss: 1.119e-03
epoch: 102, best loss: 1.049e-03
epoch: 108, best loss: 1.009e-03
epoch: 117, best loss: 9.982e-04
epoch: 118, best loss: 9.593e-04
epoch: 126, best loss: 9.534e-04
epoch: 137, best loss: 9.003e-04
epoch: 162, best loss: 8.778e-04
epoch: 164, best loss: 8.511e-04
epoch: 172, best loss: 8.246e-04
epoch: 195, best loss: 7.455e-04
epoch: 200,      loss: 9.757e-04
epoch: 209, best loss: 7.389e-04
epoch: 214, best loss: 7.039e-04
epoch: 227, best loss: 6.818e-04
epoch: 230, best loss: 6.782e-04
epoch: 235, best loss: 6.622e-04
epoch: 245, best loss: 6.250e-04
epoch: 251, best loss: 6.218e-04
epoch: 254, best loss: 6.126e-04
epoch: 262, best loss: 5.984e-04
epoch: 264, best loss: 5.972e-04
epoch: 269, best loss: 5.823e-04
epoch: 282, best loss: 5.726e-04
epoch: 293, best loss: 5.376e-04
epoch: 300,      loss: 6.139e-04
epoch: 304, best loss: 5.139e-04
epoch: 330, best loss: 4.857e-04
epoch: 349, best loss: 4.854e-04
epoch: 361, best loss: 4.847e-04
epoch: 373, best loss: 4.705e-04
epoch: 375, best loss: 4.543e-04
epoch: 394, best loss: 4.477e-04
epoch: 400,      loss: 5.126e-04
epoch: 408, best loss: 4.339e-04
epoch: 427, best loss: 4.134e-04
epoch: 440, best loss: 4.033e-04
epoch: 472, best loss: 3.755e-04
epoch: 483, best loss: 3.752e-04
epoch: 500,      loss: 3.895e-04
epoch: 501, best loss: 3.660e-04
epoch: 513, best loss: 3.447e-04
epoch: 551, best loss: 3.365e-04
epoch: 566, best loss: 3.298e-04
epoch: 600, best loss: 3.259e-04
epoch: 600,      loss: 3.259e-04
epoch: 643, best loss: 2.920e-04
epoch: 691, best loss: 2.804e-04
epoch: 700, best loss: 2.784e-04
epoch: 700,      loss: 2.784e-04
epoch: 708, best loss: 2.769e-04
epoch: 730, best loss: 2.685e-04
epoch: 738, best loss: 2.634e-04
epoch: 760, best loss: 2.549e-04
epoch: 800,      loss: 2.712e-04
epoch: 827, best loss: 2.538e-04
epoch: 836, best loss: 2.517e-04
epoch: 840, best loss: 2.511e-04
epoch: 844, best loss: 2.502e-04
epoch: 877, best loss: 2.471e-04
epoch: 896, best loss: 2.332e-04
epoch: 900,      loss: 2.794e-04
epoch: 935, best loss: 2.220e-04
epoch: 977, best loss: 2.190e-04
Training done!
    Final loss value: 2.558e-04
     Best loss value: 2.190e-04
../../_images/example_notebooks_pinns_viscous_burger_1_1.png
[2]:
space2 = NNxtSpace(
    1,
    0,
    GenericMLP,
    domain_x,
    sampler,
    layer_sizes=[32],
    post_processing=post_processing,
)
pde2 = ViscousBurgers1D(space2, init=initial_condition, f=f_rhs, g=g_bc)
pinn2 = NaturalGradientTemporalPinns(
    pde2,
    bc_type="strong",
    ic_type="strong",
    matrix_regularization=1e-4,
    functional_post_processing=functional_post_processing,
)

pinn2.solve(epochs=50, n_collocation=2000, verbose=True)

plot_abstract_approx_spaces(
    pinn2.space,
    domain_x,
    domain_mu,
    domain_t,
    time_values=[0.0, 0.2, t_max],
    loss=pinn2.losses,
    residual=pde2,
    solution=exact_solution,
    error=exact_solution,
    title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)

plt.show()
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.006e+00
epoch: 0,      loss: 1.006e+00
epoch: 1, best loss: 2.154e-01
epoch: 3, best loss: 2.011e-01
epoch: 4, best loss: 1.983e-01
epoch: 5, best loss: 1.656e-01
epoch: 6, best loss: 1.628e-01
epoch: 7, best loss: 1.221e-01
epoch: 8, best loss: 9.699e-02
epoch: 9, best loss: 9.430e-02
epoch: 10, best loss: 5.154e-02
epoch: 11, best loss: 4.064e-02
epoch: 12, best loss: 3.385e-02
epoch: 13, best loss: 2.922e-02
epoch: 14, best loss: 2.741e-02
epoch: 15, best loss: 2.397e-02
epoch: 16, best loss: 1.800e-02
epoch: 17, best loss: 1.467e-02
epoch: 18, best loss: 6.617e-03
epoch: 19, best loss: 4.033e-03
epoch: 20, best loss: 3.153e-03
epoch: 22, best loss: 2.590e-03
epoch: 23, best loss: 2.109e-03
epoch: 24, best loss: 1.404e-03
epoch: 25, best loss: 1.182e-03
epoch: 26, best loss: 1.100e-03
epoch: 27, best loss: 9.745e-04
epoch: 28, best loss: 9.663e-04
epoch: 29, best loss: 7.831e-04
epoch: 30, best loss: 3.081e-04
epoch: 31, best loss: 2.493e-04
epoch: 32, best loss: 1.689e-04
epoch: 35, best loss: 1.464e-04
epoch: 36, best loss: 1.314e-04
epoch: 37, best loss: 1.244e-04
epoch: 38, best loss: 1.203e-04
epoch: 39, best loss: 1.046e-04
epoch: 40, best loss: 1.033e-04
epoch: 41, best loss: 1.032e-04
epoch: 42, best loss: 9.780e-05
epoch: 43, best loss: 8.909e-05
epoch: 44, best loss: 8.082e-05
epoch: 45, best loss: 6.787e-05
epoch: 48, best loss: 6.394e-05
epoch: 49, best loss: 5.790e-05
Training done!
    Final loss value: 5.228e-05
     Best loss value: 5.790e-05
../../_images/example_notebooks_pinns_viscous_burger_2_1.png
[3]:
space3 = NNxtSpace(
    1,
    0,
    GenericMLP,
    domain_x,
    sampler,
    layer_sizes=[32],
    post_processing=post_processing,
)
pde3 = ViscousBurgers1D(space3, init=initial_condition, f=f_rhs, g=g_bc)
pinn3 = AnagramTemporalPinns(
    pde3,
    bc_type="strong",
    ic_type="strong",
    svd_threshold=5e-3,
    functional_post_processing=functional_post_processing,
)

pinn3.solve(epochs=50, n_collocation=2000, verbose=True)

plot_abstract_approx_spaces(
    pinn3.space,
    domain_x,
    domain_mu,
    domain_t,
    time_values=[0.0, 0.2, t_max],
    loss=pinn3.losses,
    residual=pde3,
    solution=exact_solution,
    error=exact_solution,
    title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
)

plt.show()
activating optimizer ScimbaSGD
epoch: 0, best loss: 9.656e-01
epoch: 0,      loss: 9.656e-01
epoch: 3, best loss: 9.317e-01
epoch: 5, best loss: 8.498e-01
epoch: 6, best loss: 3.940e-01
epoch: 7, best loss: 3.680e-01
epoch: 8, best loss: 3.558e-01
epoch: 9, best loss: 3.248e-01
epoch: 10, best loss: 3.137e-01
epoch: 12, best loss: 1.708e-01
epoch: 13, best loss: 1.240e-01
epoch: 14, best loss: 1.118e-01
epoch: 15, best loss: 4.398e-02
epoch: 16, best loss: 7.700e-03
epoch: 17, best loss: 2.904e-03
epoch: 18, best loss: 7.091e-04
epoch: 19, best loss: 2.857e-04
epoch: 20, best loss: 8.477e-05
epoch: 21, best loss: 1.377e-05
epoch: 22, best loss: 4.462e-07
epoch: 23, best loss: 2.579e-07
epoch: 24, best loss: 1.451e-07
epoch: 25, best loss: 1.207e-07
epoch: 26, best loss: 9.018e-08
epoch: 27, best loss: 8.475e-08
epoch: 28, best loss: 7.668e-08
epoch: 31, best loss: 7.530e-08
epoch: 34, best loss: 7.374e-08
epoch: 35, best loss: 7.266e-08
epoch: 37, best loss: 7.183e-08
epoch: 38, best loss: 7.176e-08
epoch: 40, best loss: 7.047e-08
Training done!
    Final loss value: 7.374e-08
     Best loss value: 7.047e-08
../../_images/example_notebooks_pinns_viscous_burger_3_1.png
[4]:
plot_abstract_approx_spaces(
    (pinn.space, pinn2.space, pinn3.space),
    domain_x,
    domain_mu,
    domain_t,
    loss=(pinn.losses, pinn2.losses, pinn3.losses),
    residual=(pde, pde2, pde3),
    solution=exact_solution,
    error=exact_solution,
    title="learning sol of 1D viscous Burgers' equation with TemporalPinns, strong initial and boundaries conditions",
    titles=("no preconditioner", "ENG preconditioner", "Anagram preconditioner"),
)

plt.show()
../../_images/example_notebooks_pinns_viscous_burger_4_0.png
[ ]: