r"""Solves a 2D Poisson PDE with Dirichlet boundary conditions using PINNs. .. math:: -\delta u & = f in \Omega where :math:`x = (x_1, x_2) \in \Omega = (-1, 1) \times (-1, 1)` and :math:`f` such that :math:`u(x_1, x_2) = \sin(\pi x_1) \sin(\pi x_2)`, and :math:`g = 0`. The boundary conditions are either homogeneous Dirichlet conditions enforced strongly or weakly. The neural network is a simple MLP (Multilayer Perceptron). The optimization is done using either Adam or Natural Gradient Descent or SS-BFGS. """ import timeit import jax import jax.numpy as jnp import matplotlib.pyplot as plt from scimba_jax.domains.meshless_domains.domains_2d import Square2D from scimba_jax.nonlinear_approximation.approximation_spaces.approximation_spaces import ( ApproximationSpace, ) from scimba_jax.nonlinear_approximation.integration.monte_carlo import ( DomainSampler, TensorizedSampler, ) from scimba_jax.nonlinear_approximation.integration.monte_carlo_parameters import ( UniformParametricSampler, ) # from scimba_jax.nonlinear_approximation.losses.loss_computation import make_grad_theta from scimba_jax.nonlinear_approximation.networks.mlp import MLP from scimba_jax.nonlinear_approximation.numerical_solvers.projectors import Projector # import scimba_jax from scimba_jax.physical_models.elliptic_pde.laplacians import LaplacianDirichletND from scimba_jax.plots.plots_nd import ( plot_abstract_approx_space, plot_abstract_approx_spaces, ) N_COLLOC = 1000 N_BC_COLLOC = 2000 N_EPOCHS = 50 N_EPOCHS_ADAM = 1000 N_EPOCHS_SSBFGS = 100 key = jax.random.PRNGKey(0) def f_rhs(xy: jnp.ndarray, mu: jnp.ndarray) -> jnp.ndarray: x, y = xy[0:1], xy[1:2] return 2 * jnp.pi**2 * jnp.sin(jnp.pi * x) * jnp.sin(jnp.pi * y) def exact_sol(xy: jnp.ndarray, mu: jnp.ndarray) -> jnp.ndarray: x, y = xy[:, 0:1], xy[:, 1:2] return jnp.sin(jnp.pi * x) * jnp.sin(jnp.pi * y) def post_processing( approx: jnp.ndarray, xy: jnp.ndarray, mu: jnp.ndarray ) -> jnp.ndarray: x, y = xy[0:1], xy[1:2] return approx * (x + 1.0) * (1.0 - x) * (y + 1.0) * (1.0 - y) domain_x = [(-1.0, 1.0), (-1.0, 1.0)] domain_mu = [] dx = Square2D(domain_x, is_main_domain=True) sampler = TensorizedSampler( [DomainSampler(dx), UniformParametricSampler(domain_mu)], bc=False ) # sample the domain key, sample_dict = sampler.sample(key, N_COLLOC) print("\n\n") print("@@@@@@@@@@@@@@@ create a pinn with strong bc @@@@@@@@@@@@@@@@@@@@@") nn = MLP(in_size=2, out_size=1, hidden_sizes=[32, 32], key=key) space = ApproximationSpace( {"x": 2}, [(nn, "scalar", None)], model_type="x_mu", post_processing=post_processing ) model = LaplacianDirichletND(dx, lambda *args: f_rhs(*args), bc="strong") pinn = Projector(model, space, sampler) loss = pinn.evaluate_loss(space, sample_dict) print("initial loss: ", loss) losses = pinn.evaluate_losses(space, sample_dict) print("initial losses: ", losses) print("\n\n") print("@@@@@@@@@@@@@@@ train with Energy Natural Gradient @@@@@@@@@@@@@@@@@@@@@") start = timeit.default_timer() new_loss, nspace, key, loss_history = pinn.project(space, key, N_EPOCHS, N_COLLOC) pinn.losses.loss_history = loss_history pinn.best_loss = new_loss pinn.space = nspace end = timeit.default_timer() print("best loss: ", new_loss) print("time for %d epochs: " % N_EPOCHS, end - start) # for plotting pinns plot_abstract_approx_space( pinn.space, # the approximation space dx, # the spatial domain domain_mu, # the parameter's domain loss=pinn.losses, # for plot of the loss: the losses residual=pinn.model, # for plot of the residual: the pde solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.2], [0.0, 1.0]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, ) plt.show() print("\n\n") print("@@@@@@@@@@@@@@@ train with Adam @@@@@@@@@@@@@@@@@@@@@@") pinn2 = Projector(model, space, sampler, optimizer="Adam") start = timeit.default_timer() new_loss, nspace, key, loss_history = pinn2.project(space, key, N_EPOCHS_ADAM, N_COLLOC) pinn2.losses.loss_history = loss_history pinn2.best_loss = new_loss pinn2.space = nspace end = timeit.default_timer() print("best loss: ", new_loss) print("time for %d epochs: " % N_EPOCHS_ADAM, end - start) print("\n\n") print("@@@@@@@@@@@@@@@ train with SS-BFGS @@@@@@@@@@@@@@@@@@@@@@") pinn3 = Projector(model, space, sampler, optimizer="SS-BFGS") start = timeit.default_timer() new_loss, nspace, key, loss_history = pinn.project( space, key, N_EPOCHS_SSBFGS, N_COLLOC ) pinn3.losses.loss_history = loss_history pinn3.best_loss = new_loss pinn3.space = nspace end = timeit.default_timer() print("best loss: ", new_loss) print("time for %d epochs: " % N_EPOCHS_SSBFGS, end - start) plot_abstract_approx_spaces( (pinn.space, pinn2.space, pinn3.space), # the spaces dx, # the spatial domain domain_mu, # the parameter's domain loss=(pinn.losses, pinn2.losses, pinn3.losses), # the losses residual=(pinn.model, pinn2.model, pinn3.model), # the models solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func draw_contours=True, n_drawn_contours=20, ) plt.show() print("\n\n") print("@@@@@@@@@@@@@@@ create a pinn with weak bc @@@@@@@@@@@@@@@@@@@@@") sampler = TensorizedSampler( [DomainSampler(dx), UniformParametricSampler(domain_mu)], bc=True ) model = LaplacianDirichletND(dx, lambda *args: f_rhs(*args), bc="weak") weights_dict = {"interior": [1.0], "boundary": [40.0]} key, sample_dict = sampler.sample(key, N_COLLOC, N_BC_COLLOC) nn = MLP(in_size=2, out_size=1, hidden_sizes=[32, 32], key=key) space = ApproximationSpace( {"x": 2}, [(nn, "scalar", None)], model_type="x_mu", ) pinn = Projector(model, space, sampler, weights=weights_dict) loss = pinn.evaluate_loss(space, sample_dict) print("initial loss: ", loss) losses = pinn.evaluate_losses(space, sample_dict) print("initial losses: ", losses) print("\n\n") print("@@@@@@@@@@@@@@@ train with Energy Natural Gradient @@@@@@@@@@@@@@@@@@@@@") start = timeit.default_timer() new_loss, nspace, key, loss_history = pinn.project(space, key, N_EPOCHS, N_COLLOC) pinn.losses.loss_history = loss_history pinn.best_loss = new_loss pinn.space = nspace end = timeit.default_timer() print("best loss: ", new_loss) print("time for %d epochs: " % N_EPOCHS, end - start) plot_abstract_approx_space( pinn.space, # the approximation space dx, # the spatial domain domain_mu, # the parameter's domain loss=pinn.losses, # for plot of the loss: the losses residual=pinn.model, # for plot of the residual: the pde solution=exact_sol, # for plot of the exact sol: sol error=exact_sol, # for plot of the error with respect to a func: the func cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.2], [0.0, 1.0]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, ) plt.show()