Time discrete: kinetic Vlasov in 1DΒΆ
Solves the Vlasov equation on a periodic square.
\[\partial_t u + v \partial_x u + \sin(x) \partial_v u = 0\]
where \(u: \mathbb{R} \times \mathbb{R} \times (0, T) \to \mathbb{R}\) is the unknown function, depending on space, velocity, and time.
The equation is solved using the neural semi-Lagrangian scheme, with either a classical Adam optimizer, or a natural gradient preconditioning.
[1]:
import torch
from scimba_torch import PI
from scimba_torch.approximation_space.nn_space import NNxvSpace
from scimba_torch.domain.meshless_domain.domain_1d import Segment1D
from scimba_torch.integration.monte_carlo import DomainSampler, TensorizedSampler
from scimba_torch.integration.monte_carlo_parameters import (
UniformParametricSampler,
UniformVelocitySamplerOnCuboid,
)
from scimba_torch.neural_nets.coordinates_based_nets.features import (
PeriodicMLP,
PeriodicResNet,
)
from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP
from scimba_torch.neural_nets.coordinates_based_nets.res_net import GenericResNet
from scimba_torch.numerical_solvers.temporal_pde.neural_semilagrangian import (
Characteristic,
NeuralSemiLagrangian,
)
from scimba_torch.numerical_solvers.temporal_pde.time_discrete import (
TimeDiscreteCollocationProjector,
TimeDiscreteNaturalGradientProjector,
)
from scimba_torch.optimizers.optimizers_data import OptimizerData
from scimba_torch.physical_models.kinetic_pde.vlasov import Vlasov
SQRT_2_PI = (2 * PI) ** 0.5
DOMAIN_X = Segment1D((0, 2 * PI), is_main_domain=True)
DOMAIN_V = Segment1D((-6, 6))
DOMAIN_MU = []
SAMPLER = TensorizedSampler(
[
DomainSampler(DOMAIN_X),
UniformVelocitySamplerOnCuboid(DOMAIN_V),
UniformParametricSampler(DOMAIN_MU),
]
)
def initial_condition(x, v, mu):
v_ = v.get_components()
return torch.exp(-(v_**2) / 2) / SQRT_2_PI
def electric_field(t, x, mu):
x_ = x.get_components()
return torch.sin(x_)
def opt():
opt_1 = {
"name": "adam",
"optimizerArgs": {"lr": 2.5e-2, "betas": (0.9, 0.999)},
}
return OptimizerData(opt_1)
def solve_with_neural_sl(
T: float,
dt: float,
with_natural_gradient: bool = True,
with_classical_projector: bool = False,
N_c: int = 10_000,
res_net: bool = False,
periodic: bool = True,
):
torch.random.manual_seed(0)
if res_net:
if periodic:
net = PeriodicResNet
else:
net = GenericResNet
else:
if periodic:
net = PeriodicMLP
else:
net = GenericMLP
if with_classical_projector:
space = NNxvSpace(
1,
0,
net,
DOMAIN_X,
DOMAIN_V,
SAMPLER,
layer_sizes=[60, 60, 60],
layer_structure=[50, 4, [1, 3]],
activation_type="tanh",
)
pde = Vlasov(
space,
initial_condition=initial_condition,
electric_field=electric_field,
)
characteristic = Characteristic(pde, periodic=True)
projector = TimeDiscreteCollocationProjector(
pde, rhs=initial_condition, optimizers=opt()
)
scheme = NeuralSemiLagrangian(characteristic, projector)
scheme.initialization(epochs=750, n_collocation=N_c, verbose=True)
scheme.projector.save("ini_transport_2D_SL")
scheme.projector = TimeDiscreteCollocationProjector(
space, rhs=initial_condition, optimizers=opt()
)
scheme.projector.load("ini_transport_2D_SL")
scheme.projector.space.load_from_best_approx()
scheme.solve(dt=dt, final_time=T, epochs=750, n_collocation=N_c, verbose=True)
if with_natural_gradient:
space = NNxvSpace(
1,
0,
net,
DOMAIN_X,
DOMAIN_V,
SAMPLER,
layer_sizes=[20, 20, 20],
layer_structure=[20, 3, [1, 3]],
activation_type="tanh",
)
pde = Vlasov(
space,
initial_condition=initial_condition,
electric_field=electric_field,
)
characteristic = Characteristic(pde, periodic=True)
projector = TimeDiscreteNaturalGradientProjector(pde, rhs=initial_condition)
scheme = NeuralSemiLagrangian(characteristic, projector)
scheme.initialization(epochs=100, n_collocation=N_c, verbose=True)
scheme.solve(dt=dt, final_time=T, epochs=100, n_collocation=N_c, verbose=True)
return scheme
if __name__ == "__main__":
scheme = solve_with_neural_sl(
3.0,
1.5,
with_natural_gradient=True,
with_classical_projector=False,
N_c=64**2,
res_net=True,
periodic=True,
)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from scimba_torch.plots.plot_time_discrete_scheme import plot_time_discrete_scheme
plot_time_discrete_scheme(scheme, aspect="auto")
plt.show()
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.395e-01
epoch: 0, loss: 1.395e-01
epoch: 2, best loss: 1.291e-01
epoch: 3, best loss: 1.045e-01
epoch: 4, best loss: 4.707e-02
epoch: 5, best loss: 1.200e-02
epoch: 6, best loss: 9.319e-03
epoch: 7, best loss: 7.118e-03
epoch: 8, best loss: 5.505e-03
epoch: 9, best loss: 4.923e-03
epoch: 10, best loss: 1.621e-03
epoch: 11, best loss: 5.103e-04
epoch: 12, best loss: 4.398e-04
epoch: 13, best loss: 1.066e-06
epoch: 14, best loss: 9.065e-09
epoch: 15, best loss: 6.101e-09
epoch: 16, best loss: 3.910e-09
epoch: 17, best loss: 3.393e-09
epoch: 18, best loss: 2.313e-09
epoch: 19, best loss: 1.667e-09
epoch: 21, best loss: 1.276e-09
epoch: 22, best loss: 1.244e-09
epoch: 23, best loss: 1.054e-09
epoch: 25, best loss: 9.919e-10
epoch: 26, best loss: 9.019e-10
epoch: 27, best loss: 8.866e-10
epoch: 28, best loss: 8.509e-10
epoch: 29, best loss: 6.667e-10
epoch: 30, best loss: 6.573e-10
epoch: 31, best loss: 6.548e-10
epoch: 32, best loss: 5.975e-10
epoch: 35, best loss: 5.152e-10
epoch: 38, best loss: 5.072e-10
epoch: 39, best loss: 4.509e-10
epoch: 43, best loss: 3.915e-10
epoch: 47, best loss: 3.471e-10
epoch: 51, best loss: 2.931e-10
epoch: 52, best loss: 2.925e-10
epoch: 57, best loss: 2.567e-10
epoch: 58, best loss: 2.544e-10
epoch: 59, best loss: 2.522e-10
epoch: 61, best loss: 2.303e-10
epoch: 62, best loss: 2.240e-10
epoch: 68, best loss: 1.912e-10
epoch: 75, best loss: 1.638e-10
epoch: 84, best loss: 1.384e-10
epoch: 85, best loss: 1.373e-10
epoch: 93, best loss: 1.140e-10
Training done!
Final loss value: 1.223e-10
Best loss value: 1.140e-10
epoch: 0, best loss: 8.235e-03
epoch: 0, loss: 8.235e-03
epoch: 1, best loss: 7.342e-03
epoch: 2, best loss: 6.741e-03
epoch: 3, best loss: 6.148e-03
epoch: 4, best loss: 5.244e-03
epoch: 5, best loss: 4.091e-03
epoch: 6, best loss: 4.081e-03
epoch: 7, best loss: 2.558e-03
epoch: 9, best loss: 1.824e-03
epoch: 10, best loss: 1.615e-03
epoch: 11, best loss: 1.012e-03
epoch: 12, best loss: 5.459e-04
epoch: 13, best loss: 1.897e-04
epoch: 14, best loss: 8.429e-05
epoch: 15, best loss: 4.051e-05
epoch: 16, best loss: 1.898e-05
epoch: 17, best loss: 1.043e-05
epoch: 18, best loss: 5.945e-06
epoch: 19, best loss: 5.787e-06
epoch: 20, best loss: 2.389e-06
epoch: 21, best loss: 1.383e-07
epoch: 22, best loss: 8.469e-08
epoch: 23, best loss: 7.625e-08
epoch: 24, best loss: 5.865e-08
epoch: 25, best loss: 5.074e-08
epoch: 26, best loss: 4.407e-08
epoch: 27, best loss: 4.118e-08
epoch: 28, best loss: 3.886e-08
epoch: 29, best loss: 3.244e-08
epoch: 30, best loss: 3.226e-08
epoch: 31, best loss: 3.081e-08
epoch: 32, best loss: 2.729e-08
epoch: 34, best loss: 2.685e-08
epoch: 35, best loss: 2.385e-08
epoch: 36, best loss: 2.214e-08
epoch: 38, best loss: 2.034e-08
epoch: 40, best loss: 1.980e-08
epoch: 41, best loss: 1.901e-08
epoch: 42, best loss: 1.733e-08
epoch: 43, best loss: 1.691e-08
epoch: 45, best loss: 1.679e-08
epoch: 46, best loss: 1.556e-08
epoch: 47, best loss: 1.505e-08
epoch: 49, best loss: 1.412e-08
epoch: 51, best loss: 1.386e-08
epoch: 53, best loss: 1.300e-08
epoch: 54, best loss: 1.275e-08
epoch: 55, best loss: 1.242e-08
epoch: 56, best loss: 1.228e-08
epoch: 59, best loss: 1.168e-08
epoch: 60, best loss: 1.102e-08
epoch: 64, best loss: 9.554e-09
epoch: 65, best loss: 9.511e-09
epoch: 66, best loss: 8.889e-09
epoch: 70, best loss: 8.385e-09
epoch: 73, best loss: 7.878e-09
epoch: 77, best loss: 7.391e-09
epoch: 79, best loss: 7.100e-09
epoch: 80, best loss: 7.060e-09
epoch: 81, best loss: 6.935e-09
epoch: 84, best loss: 6.626e-09
epoch: 86, best loss: 6.538e-09
epoch: 87, best loss: 6.414e-09
epoch: 89, best loss: 5.990e-09
epoch: 95, best loss: 5.749e-09
epoch: 96, best loss: 5.658e-09
epoch: 99, best loss: 5.321e-09
Training done!
Final loss value: 6.387e-09
Best loss value: 5.321e-09
epoch: 0, best loss: 8.099e-03
epoch: 0, loss: 8.099e-03
epoch: 1, best loss: 7.176e-03
epoch: 2, best loss: 5.078e-03
epoch: 3, best loss: 4.014e-03
epoch: 4, best loss: 3.266e-03
epoch: 5, best loss: 2.338e-03
epoch: 6, best loss: 1.439e-03
epoch: 7, best loss: 4.898e-04
epoch: 8, best loss: 3.831e-04
epoch: 9, best loss: 7.434e-05
epoch: 10, best loss: 5.374e-05
epoch: 11, best loss: 6.186e-06
epoch: 12, best loss: 1.194e-06
epoch: 13, best loss: 9.904e-07
epoch: 14, best loss: 9.320e-07
epoch: 15, best loss: 7.153e-07
epoch: 16, best loss: 4.183e-07
epoch: 17, best loss: 3.251e-07
epoch: 19, best loss: 2.462e-07
epoch: 21, best loss: 2.426e-07
epoch: 22, best loss: 2.031e-07
epoch: 23, best loss: 1.782e-07
epoch: 24, best loss: 1.553e-07
epoch: 27, best loss: 1.384e-07
epoch: 29, best loss: 1.304e-07
epoch: 30, best loss: 1.230e-07
epoch: 32, best loss: 1.068e-07
epoch: 34, best loss: 1.038e-07
epoch: 36, best loss: 9.462e-08
epoch: 38, best loss: 9.036e-08
epoch: 40, best loss: 9.034e-08
epoch: 41, best loss: 7.673e-08
epoch: 42, best loss: 7.565e-08
epoch: 43, best loss: 7.212e-08
epoch: 47, best loss: 6.994e-08
epoch: 48, best loss: 6.798e-08
epoch: 49, best loss: 6.415e-08
epoch: 50, best loss: 6.101e-08
epoch: 51, best loss: 5.768e-08
epoch: 52, best loss: 5.504e-08
epoch: 53, best loss: 5.419e-08
epoch: 56, best loss: 5.234e-08
epoch: 62, best loss: 4.598e-08
epoch: 64, best loss: 4.522e-08
epoch: 68, best loss: 4.421e-08
epoch: 70, best loss: 4.180e-08
epoch: 73, best loss: 3.969e-08
epoch: 74, best loss: 3.932e-08
epoch: 78, best loss: 3.905e-08
epoch: 79, best loss: 3.800e-08
epoch: 80, best loss: 3.798e-08
epoch: 81, best loss: 3.758e-08
epoch: 83, best loss: 3.581e-08
epoch: 84, best loss: 3.487e-08
epoch: 87, best loss: 3.279e-08
epoch: 90, best loss: 3.176e-08
epoch: 98, best loss: 3.099e-08
Training done!
Final loss value: 4.594e-08
Best loss value: 3.099e-08
[ ]: