r"""Solves a 2D Poisson PDE with Dirichlet boundary conditions using PINNs. .. math:: -\mu \delta u & = f in \Omega \\ u & = 0 on \partial \Omega where :math:`x = (x_1, x_2) \in \Omega` (with :math:`\Omega` a square rotated by :math:`\pi/3` and with a disk-shaped hole). Boundary conditions are enforced weakly. The neural network used is a simple MLP (Multilayer Perceptron), and the optimization is done using natural gradient descent. """ import timeit import jax import jax.numpy as jnp import matplotlib.pyplot as plt from scimba_jax.domains.domain_mapping import DomainMapping as Mapping from scimba_jax.domains.meshless_domains.domains_2d import Disk2D, Square2D from scimba_jax.nonlinear_approximation.approximation_spaces.approximation_spaces import ( ApproximationSpace, ) from scimba_jax.nonlinear_approximation.integration.monte_carlo import ( DomainSampler, TensorizedSampler, ) from scimba_jax.nonlinear_approximation.integration.monte_carlo_parameters import ( UniformParametricSampler, ) from scimba_jax.nonlinear_approximation.networks.mlp import MLP from scimba_jax.nonlinear_approximation.numerical_solvers.projectors import Projector # import scimba_jax from scimba_jax.physical_models.elliptic_pde.laplacians import LaplacianDirichletND from scimba_jax.plots.plots_nd import ( plot_abstract_approx_space, ) N_COLLOC = 1000 N_BC_COLLOC = 1500 N_EPOCHS = 200 N_EPOCHS_SSBFGS = 100 key = jax.random.PRNGKey(0) def f_rhs(xy: jnp.ndarray, mu: jnp.ndarray) -> jnp.ndarray: x, y = xy[0:1], xy[1:2] return 8.0 * jnp.pi**2 * jnp.sin(2.0 * jnp.pi * x) * jnp.sin(2.0 * jnp.pi * y) domain_x = [(0.0, 1.0), (0.0, 1.0)] domain_mu = [] dx = Square2D(domain_x, is_main_domain=True) dx.add_hole(Disk2D((0.5, 0.5), 0.2)) dx.set_mapping( Mapping.rot_2d( angle=jnp.pi / 3.0, center=jnp.array([0.5, 0.5]), ), [(-0.2, 1.2), (-0.2, 1.2)], ) sampler = TensorizedSampler( [DomainSampler(dx), UniformParametricSampler(domain_mu)], bc=True ) # sample the domain key, sample_dict = sampler.sample(key, N_COLLOC, N_BC_COLLOC) print("\n\n") print("@@@@@@@@@@@@@@@ create a pinn with strong bc @@@@@@@@@@@@@@@@@@@@@") nn = MLP(in_size=2, out_size=1, hidden_sizes=[20] * 3, key=key) space = ApproximationSpace({"x": 2}, [(nn, "scalar", None)], model_type="x_mu") model = LaplacianDirichletND(dx, lambda *args: f_rhs(*args), bc="weak") weights_dict = {"interior": [1.0], "boundary": [40.0]} pinn = Projector(model, space, sampler, weights=weights_dict) loss = pinn.evaluate_loss(space, sample_dict) print("initial loss: ", loss) losses = pinn.evaluate_losses(space, sample_dict) print("initial losses: ", losses) print("\n\n") print("@@@@@@@@@@@@@@@ train with Energy Natural Gradient @@@@@@@@@@@@@@@@@@@@@") start = timeit.default_timer() new_loss, nspace, key, loss_history = pinn.project( space, key, N_EPOCHS, N_COLLOC, N_BC_COLLOC ) pinn.losses.loss_history = loss_history pinn.best_loss = new_loss pinn.space = nspace end = timeit.default_timer() print("best loss: ", new_loss) print("time for %d epochs: " % N_EPOCHS, end - start) plot_abstract_approx_space( pinn.space, # the approximation space dx, # the spatial domain domain_mu, # the parameter's domain loss=pinn.losses, # for plot of the loss: the losses residual=pinn.model, # for plot of the residual: the pde cuts=[ ([0.0, 0.0], [-0.5, 0.5]), ([0.0, 0.5], [-0.5, -0.5]), ], # for plots on linear cuts of dim d-1, a tuple (point, normal vector) representing the affine space of dim d-1 draw_contours=True, n_drawn_contours=20, ) plt.show()