Flows: pendulumΒΆ
Learns the flow of a pendulum ODE using different training strategies.
This example shows how to use different training strategies to learn the dynamics of a pendulum (i.e., its flow) with a varying pulsation (parameter \(\mu\)). The dynamics is given by the Hamiltonian equations with Hamiltonian \(H = \frac{p^2}{2} + \frac{\mu q^2}{2} + \frac{\mu 0.012 q^3}{3}\). The training data is generated using the explicit Verlet scheme. The following flows are compared:
A neural network (MLP) based flow.
A SympNet based flow.
An explicit Euler discretization of the flow with a MLP.
A symplectic Euler discretization of the flow (seen as a Hamiltonian flow) with a MLP.
A neural network based flow with an invertible architecture (InvertibleNet). This example does not work well for this problem.
An explicit Euler discretization of the flow (seen as a Hamiltonian flow) with a MLP. This example does not work well for this problem either.
[1]:
import matplotlib.pyplot as plt
import torch
from scimba_torch.flows.create_solution import (
create_solution,
solution_to_training_format,
)
from scimba_torch.flows.deep_flows import DiscreteFlowSpace
from scimba_torch.flows.discretization_based_flows import (
ExplicitEulerFlow,
ExplicitEulerHamiltonianFlow,
NeuralFlow,
SymplecticEulerFlowSep,
)
from scimba_torch.flows.flow_trainer import FlowTrainer, NaturalGradientFlowTrainer
from scimba_torch.neural_nets.coordinates_based_nets.mlp import GenericMLP
from scimba_torch.neural_nets.structure_preserving_nets.invertible_nn import (
InvertibleNet,
)
from scimba_torch.neural_nets.structure_preserving_nets.sympnet import SympNet
# %%
torch.manual_seed(0)
def s_dh_q(p, mu):
return p
def s_dh_p(q, mu):
return mu * q + mu * 0.012 * q**2
use_natural_gradient = True
N_simu = 300
Nt_train = 500
dt = 0.02
q0 = 1.0 + torch.rand(N_simu) * 2.0
p0 = 0.2 * torch.rand(N_simu) * 2.0
x0 = torch.stack([q0, p0], dim=1)[:, None, :]
mu = (0.8 + torch.rand(N_simu))[:, None]
x, y = create_solution(x0, mu, Nt_train, dt, (s_dh_p, s_dh_q), solver="Verlet_explicit")
mu_new = solution_to_training_format(x[1], solver="Verlet_explicit")
x_new = solution_to_training_format(x[2], solver="Verlet_explicit")
y_new = solution_to_training_format(y, solver="Verlet_explicit")
data = torch.cat([x_new, mu_new], dim=-1), y_new
# %%
Nt = int(400 / dt)
q0ref = torch.tensor([1.4])
p0ref = torch.tensor([0.12])
x0ref = torch.stack([q0ref, p0ref], dim=1)[:, None, :]
muref = torch.tensor([0.7])[:, None]
xref, yref = create_solution(
x0ref, muref, Nt, dt, (s_dh_p, s_dh_q), solver="Verlet_explicit"
)
mu_testref = solution_to_training_format(xref[1], solver="Verlet_explicit")
x_testref = solution_to_training_format(xref[2], solver="Verlet_explicit")
y_testref = solution_to_training_format(yref, solver="Verlet_explicit")
# %%
print("#1: Learn the flow with a MLP")
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
space0 = DiscreteFlowSpace(
2, 1, flow_type=NeuralFlow, net_type=GenericMLP, layer_sizes=[21] * 3, rollout=1
)
print("nb dof: ", space0.ndof)
trainer0 = trainer_type(space0, data)
trainer0.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution0 = space0.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution0[:, 0, 0].detach().cpu(), solution0[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution0[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution0[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
# %%
print("#2: Learn the flow with a SympNet")
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
space = DiscreteFlowSpace(
2, 1, flow_type=NeuralFlow, net_type=SympNet, layer_sizes=[31] * 3, rollout=1
)
trainer = trainer_type(space, data)
trainer.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution = space.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution[:, 0, 0].detach().cpu(), solution[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
# %%
print("#3: Learn an explicit Euler discretization of the flow with a MLP")
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
space2 = DiscreteFlowSpace(
2,
1,
flow_type=ExplicitEulerFlow,
net_type=GenericMLP,
layer_sizes=[21] * 3,
dt=0.02,
rollout=1,
)
print("nb dof: ", space2.ndof)
trainer2 = trainer_type(space2, data)
trainer2.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution2 = space2.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution2[:, 0, 0].detach().cpu(), solution2[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution2[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution2[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
# %%
print(
"#4: Learn a symplectic Euler discretization of the flow"
+ "(seen as a Hamiltonian flow) with a MLP"
)
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
space3 = DiscreteFlowSpace(
2,
1,
flow_type=SymplecticEulerFlowSep,
net_type=GenericMLP,
layer_sizes=[18] * 3,
dt=0.02,
rollout=1,
)
print("nb dof: ", space3.ndof)
trainer3 = trainer_type(space3, data)
trainer3.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution3 = space3.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution3[:, 0, 0].detach().cpu(), solution3[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution3[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution3[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
# %%
print("#5: Learn the flow with an invertible network")
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
space4 = DiscreteFlowSpace(
2,
1,
flow_type=NeuralFlow,
net_type=InvertibleNet,
nb_layers=4,
layer_sizes=[12] * 2,
rollout=1,
)
print("nb dof: ", space4.ndof)
trainer4 = trainer_type(space4, data)
trainer4.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution4 = space4.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution4[:, 0, 0].detach().cpu(), solution4[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution4[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution4[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
# %%
print(
"#6: Learn an explicit Euler discretization of the flow"
+ "(seen as a Hamiltonian flow) with a MLP"
)
trainer_type = NaturalGradientFlowTrainer if use_natural_gradient else FlowTrainer
epochs = 200 if use_natural_gradient else 5000
def potential(x, mu):
return 0.5 * mu[0] * x[0] ** 2.0 + mu[0] * 0.012 * x[0] ** 3 / 3.0
space5 = DiscreteFlowSpace(
2,
1,
flow_type=ExplicitEulerHamiltonianFlow,
net_type=GenericMLP,
layer_sizes=[10] * 3,
dt=0.02,
analytic_h=potential,
rollout=1,
)
print("nb dof: ", space5.ndof)
trainer5 = trainer_type(space5, data)
trainer5.solve(epochs=epochs, batch_size=100, verbose=True)
plt.plot(yref[0, 0, :, 0], yref[0, 0, :, 1])
solution5 = space5.inference(x_testref[0, :][None,], mu_testref[0, :][None,], Nt)
plt.plot(solution5[:, 0, 0].detach().cpu(), solution5[:, 0, 1].detach().cpu())
plt.show()
t = torch.linspace(0, Nt * dt, Nt)
plt.plot(t[:-1], yref[0, 0, :, 0], label="ref")
plt.plot(t[:Nt_train], solution5[:Nt_train, 0, 0].detach().cpu(), label="trained")
plt.plot(
t[Nt_train - 1 : -1],
solution5[Nt_train - 1 : -1, 0, 0].detach().cpu(),
label="composed",
)
plt.legend()
plt.show()
#1: Learn the flow with a MLP
nb dof: 1050
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.730e+00
epoch: 0, loss: 1.730e+00
epoch: 2, best loss: 1.456e+00
epoch: 3, best loss: 9.449e-01
epoch: 4, best loss: 3.720e-01
epoch: 5, best loss: 8.957e-02
epoch: 6, best loss: 5.238e-02
epoch: 7, best loss: 2.096e-03
epoch: 8, best loss: 6.449e-04
epoch: 9, best loss: 1.604e-05
epoch: 10, best loss: 1.437e-05
epoch: 11, best loss: 6.312e-06
epoch: 12, best loss: 5.965e-06
epoch: 13, best loss: 2.545e-06
epoch: 15, best loss: 1.665e-06
epoch: 17, best loss: 9.166e-07
epoch: 18, best loss: 4.821e-07
epoch: 19, best loss: 3.216e-07
epoch: 25, best loss: 1.512e-07
epoch: 32, best loss: 1.158e-07
epoch: 33, best loss: 8.925e-08
epoch: 34, best loss: 4.282e-08
epoch: 52, best loss: 4.070e-08
epoch: 53, best loss: 2.101e-08
epoch: 60, best loss: 2.001e-08
epoch: 68, best loss: 1.680e-08
epoch: 76, best loss: 1.653e-08
epoch: 83, best loss: 1.573e-08
epoch: 84, best loss: 9.820e-09
epoch: 91, best loss: 7.554e-09
epoch: 98, best loss: 4.223e-09
epoch: 100, loss: 7.971e-09
epoch: 105, best loss: 3.836e-09
epoch: 143, best loss: 2.337e-09
Training done!
Final loss value: 8.778e-09
Best loss value: 2.337e-09
#2: Learn the flow with a SympNet
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.113e+01
epoch: 0, loss: 1.113e+01
epoch: 1, best loss: 1.047e+01
epoch: 2, best loss: 4.446e+00
epoch: 5, best loss: 3.828e+00
epoch: 6, best loss: 2.749e+00
epoch: 7, best loss: 2.657e+00
epoch: 8, best loss: 2.641e+00
epoch: 9, best loss: 2.184e+00
epoch: 10, best loss: 1.874e+00
epoch: 14, best loss: 1.185e+00
epoch: 20, best loss: 1.114e+00
epoch: 28, best loss: 9.703e-01
epoch: 30, best loss: 7.886e-01
epoch: 31, best loss: 7.267e-01
epoch: 33, best loss: 6.542e-01
epoch: 35, best loss: 5.575e-01
epoch: 36, best loss: 5.020e-01
epoch: 37, best loss: 4.944e-01
epoch: 39, best loss: 4.842e-01
epoch: 40, best loss: 3.760e-01
epoch: 43, best loss: 2.528e-01
epoch: 52, best loss: 2.241e-01
epoch: 56, best loss: 1.924e-01
epoch: 57, best loss: 1.896e-01
epoch: 58, best loss: 1.245e-01
epoch: 59, best loss: 8.806e-02
epoch: 62, best loss: 5.962e-02
epoch: 63, best loss: 5.242e-02
epoch: 64, best loss: 3.170e-02
epoch: 65, best loss: 2.330e-02
epoch: 66, best loss: 1.489e-02
epoch: 68, best loss: 8.271e-03
epoch: 69, best loss: 5.511e-03
epoch: 70, best loss: 2.107e-03
epoch: 71, best loss: 1.871e-03
epoch: 72, best loss: 1.563e-03
epoch: 73, best loss: 2.832e-05
epoch: 76, best loss: 2.690e-05
epoch: 77, best loss: 2.278e-05
epoch: 78, best loss: 1.343e-05
epoch: 79, best loss: 1.284e-05
epoch: 80, best loss: 8.828e-06
epoch: 82, best loss: 2.285e-06
epoch: 87, best loss: 1.416e-06
epoch: 88, best loss: 6.112e-07
epoch: 91, best loss: 3.343e-07
epoch: 94, best loss: 3.023e-07
epoch: 98, best loss: 2.961e-07
epoch: 100, loss: 5.432e-07
epoch: 104, best loss: 2.480e-07
epoch: 107, best loss: 1.956e-07
epoch: 112, best loss: 1.783e-07
epoch: 120, best loss: 1.517e-07
epoch: 128, best loss: 4.737e-08
epoch: 186, best loss: 3.733e-08
epoch: 188, best loss: 3.580e-08
epoch: 199, best loss: 3.482e-08
Training done!
Final loss value: 5.049e-08
Best loss value: 3.482e-08
#3: Learn an explicit Euler discretization of the flow with a MLP
nb dof: 1050
activating optimizer ScimbaSGD
epoch: 0, best loss: 9.213e-04
epoch: 0, loss: 9.213e-04
epoch: 1, best loss: 7.321e-04
epoch: 2, best loss: 2.135e-04
epoch: 4, best loss: 4.798e-05
epoch: 5, best loss: 1.053e-06
epoch: 6, best loss: 2.987e-07
epoch: 7, best loss: 1.356e-07
epoch: 8, best loss: 1.201e-07
epoch: 9, best loss: 1.175e-07
epoch: 10, best loss: 4.852e-08
epoch: 11, best loss: 2.629e-08
epoch: 13, best loss: 1.983e-08
epoch: 14, best loss: 1.942e-08
epoch: 15, best loss: 1.322e-08
epoch: 18, best loss: 1.303e-08
epoch: 19, best loss: 1.286e-08
epoch: 20, best loss: 1.037e-08
epoch: 22, best loss: 8.807e-09
epoch: 29, best loss: 5.498e-09
epoch: 36, best loss: 5.054e-09
epoch: 37, best loss: 4.573e-09
epoch: 38, best loss: 4.553e-09
epoch: 39, best loss: 3.555e-09
epoch: 48, best loss: 2.542e-09
epoch: 61, best loss: 2.105e-09
epoch: 63, best loss: 1.548e-09
epoch: 88, best loss: 1.474e-09
epoch: 89, best loss: 1.161e-09
epoch: 100, loss: 1.352e-09
epoch: 110, best loss: 1.080e-09
epoch: 124, best loss: 5.955e-10
epoch: 163, best loss: 5.758e-10
epoch: 176, best loss: 5.396e-10
epoch: 187, best loss: 4.355e-10
epoch: 190, best loss: 3.925e-10
Training done!
Final loss value: 6.727e-10
Best loss value: 3.925e-10
#4: Learn a symplectic Euler discretization of the flow(seen as a Hamiltonian flow) with a MLP
nb dof: 1512
activating optimizer ScimbaSGD
epoch: 0, best loss: 1.036e-03
epoch: 0, loss: 1.036e-03
epoch: 1, best loss: 6.707e-04
epoch: 2, best loss: 3.748e-04
epoch: 3, best loss: 2.507e-04
epoch: 4, best loss: 1.530e-04
epoch: 5, best loss: 1.184e-04
epoch: 6, best loss: 1.055e-04
epoch: 7, best loss: 1.169e-05
epoch: 8, best loss: 8.615e-07
epoch: 9, best loss: 4.437e-07
epoch: 10, best loss: 3.261e-07
epoch: 11, best loss: 2.355e-07
epoch: 12, best loss: 1.953e-07
epoch: 14, best loss: 1.761e-07
epoch: 25, best loss: 1.487e-07
epoch: 37, best loss: 1.373e-07
epoch: 53, best loss: 1.262e-07
epoch: 73, best loss: 1.185e-07
epoch: 100, loss: 1.637e-07
Training done!
Final loss value: 1.629e-07
Best loss value: 1.185e-07
#5: Learn the flow with an invertible network
nb dof: 1632
activating optimizer ScimbaSGD
epoch: 0, best loss: 3.240e-01
epoch: 0, loss: 3.240e-01
epoch: 1, best loss: 4.588e-02
epoch: 2, best loss: 1.853e-03
epoch: 3, best loss: 5.806e-04
epoch: 4, best loss: 5.037e-04
epoch: 8, best loss: 4.563e-04
epoch: 29, best loss: 4.562e-04
epoch: 100, loss: 5.695e-04
epoch: 107, best loss: 4.137e-04
Training done!
Final loss value: 5.542e-04
Best loss value: 4.137e-04
#6: Learn an explicit Euler discretization of the flow(seen as a Hamiltonian flow) with a MLP
nb dof: 270
activating optimizer ScimbaSGD
epoch: 0, best loss: 6.299e-04
epoch: 0, loss: 6.299e-04
epoch: 1, best loss: 5.132e-04
epoch: 4, best loss: 4.223e-04
epoch: 5, best loss: 3.098e-04
epoch: 6, best loss: 1.950e-04
epoch: 7, best loss: 1.585e-04
epoch: 8, best loss: 1.364e-04
epoch: 9, best loss: 1.158e-04
epoch: 10, best loss: 6.346e-05
epoch: 11, best loss: 3.330e-05
epoch: 12, best loss: 2.746e-05
epoch: 13, best loss: 1.233e-05
epoch: 14, best loss: 7.007e-06
epoch: 15, best loss: 3.398e-06
epoch: 16, best loss: 1.729e-06
epoch: 17, best loss: 1.078e-06
epoch: 21, best loss: 4.994e-07
epoch: 22, best loss: 3.871e-07
epoch: 28, best loss: 3.610e-07
epoch: 31, best loss: 3.565e-07
epoch: 32, best loss: 3.413e-07
epoch: 33, best loss: 3.013e-07
epoch: 38, best loss: 2.629e-07
epoch: 45, best loss: 1.973e-07
epoch: 51, best loss: 1.418e-07
epoch: 100, loss: 2.187e-07
Training done!
Final loss value: 2.191e-07
Best loss value: 1.418e-07
[ ]: