Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nbody ode #102

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
2 changes: 1 addition & 1 deletion flowpm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .utils import cic_paint, cic_readout
from .tfpm import nbody, linear_field, lpt_init
from .tfpm import nbody, linear_field, lpt_init, make_ode_fn
from .pk import power_spectrum
import flowpm.cosmology as cosmology
import flowpm.tfbackground as background
Expand Down
57 changes: 57 additions & 0 deletions flowpm/jax2tf_save_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import tensorflow as tf
import jax
import numpy as np
import jax.numpy as jnp
from jax.experimental import jax2tf
import haiku as hk
import sonnet as snt
import tree
import pickle
from flowpm.nn import NeuralSplineFourierFilter


def fun(x, a):
network = NeuralSplineFourierFilter(n_knots=16, latent_size=32)
return network(x, a)


fun = hk.without_apply_rng(hk.transform(fun))
params = pickle.load(
open("/local/home/dl264294/flowpm/notebooks/camels_25_64_pkloss.params",
"rb"))


def create_variable(path, value):
name = '/'.join(map(str, path)).replace('~', '_')
return tf.Variable(value, name=name)


class JaxNSFF(snt.Module):

def __init__(self, params, apply_fn, name=None):
super().__init__(name=name)
self._params = tree.map_structure_with_path(create_variable, params)
self._apply = jax2tf.convert(lambda p, x, a: apply_fn(p, x, a))
self._apply = tf.autograph.experimental.do_not_convert(self._apply)

def __call__(self, input1, input2):
return self._apply(self._params, input1, input2)


net = JaxNSFF(params, fun.apply)


@tf.function(
autograph=False,
input_signature=[
tf.TensorSpec([128, 128, 128]),
tf.TensorSpec([]),
])
def forward(x, a):
return net(x, a)


to_save = tf.Module()
to_save.forward = forward
to_save.params = list(net.variables)
tf.saved_model.save(to_save, "/local/home/dl264294/flowpm/saved_model")
80 changes: 80 additions & 0 deletions flowpm/neural_ode_nbody.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import tensorflow as tf
import numpy as np
import flowpm
from flowpm.kernels import fftk, longrange_kernel, gradient_kernel, laplace_kernel
from flowpm.utils import cic_readout, compensate_cic, c2r3d, r2c3d
from flowpm.cosmology import Planck15

loaded = tf.saved_model.load("/local/home/dl264294/flowpm/saved_model")


def make_neural_ode_fn(nc, batch_size):

def neural_nbody_ode(a, state, Omega_c, sigma8, Omega_b, n_s, h, w0):
"""
Estimate force on the particles given a state.
Parameters:
-----------
nc: int
Number of cells in the field.

batch_size: int
Size of batches

params_filename:
PM correction parameters

a : array_like or tf.TensorArray
Scale factor

state: tensor
Input state tensor of shape (2, batch_size, npart, 3)
Omega_c, sigma8, Omega_b, n_s,h, w0 : Scalar float Tensor
Cosmological parameters
Returns
-------
dpos: tensor (batch_size, npart, 3)
Updated position at a given state
dvel: tensor (batch_size, npart, 3)
Updated velocity at a given state
"""

pos = state[0]
vel = state[1]
kvec = fftk([nc, nc, nc], symmetric=False)
cosmo = flowpm.cosmology.Planck15(
Omega_c=Omega_c, sigma8=sigma8, Omega_b=Omega_b, n_s=n_s, h=h, w0=w0)
delta = flowpm.cic_paint(tf.zeros([batch_size, nc, nc, nc]), pos)
delta_k = r2c3d(delta)

# Computes gravitational potential
lap = tf.cast(laplace_kernel(kvec), tf.complex64)
fknlrange = longrange_kernel(kvec, r_split=0)
kweight = lap * fknlrange
pot_k = tf.multiply(delta_k, kweight)

# Apply a correction filter
kk = tf.math.sqrt(sum((ki / np.pi)**2 for ki in kvec))
pot_k = pot_k * tf.cast(
(1. + loaded.forward(tf.cast(kk, tf.float32), tf.cast(a, tf.float32))),
tf.complex64)

# Computes gravitational forces

forces = tf.stack([
flowpm.cic_readout(
c2r3d(tf.multiply(pot_k, gradient_kernel(kvec, i))), pos)
for i in range(3)
],
axis=-1)
forces = forces * 1.5 * cosmo.Omega_m

#Computes the update of position (drift)
dpos = 1. / (a**3 * flowpm.tfbackground.E(cosmo, a)) * vel

#Computes the update of velocity (kick)
dvel = 1. / (a**2 * flowpm.tfbackground.E(cosmo, a)) * forces

return tf.stack([dpos, dvel], axis=0)

return neural_nbody_ode
58 changes: 58 additions & 0 deletions flowpm/nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""Stolen from JaxPM Github repo https://github.com/DifferentiableUniverseInitiative/JaxPM/blob/main/jaxpm/nn.py"""
import jax
import jax.numpy as jnp
import haiku as hk


def _deBoorVectorized(x, t, c, p):
"""
Evaluates S(x).
Args
----
x: position
t: array of knot positions, needs to be padded as described above
c: array of control points
p: degree of B-spline
"""
k = jnp.digitize(x, t) - 1

d = [c[j + k - p] for j in range(0, p + 1)]
for r in range(1, p + 1):
for j in range(p, r - 1, -1):
alpha = (x - t[j + k - p]) / (t[j + 1 + k - r] - t[j + k - p])
d[j] = (1.0 - alpha) * d[j - 1] + alpha * d[j]
return d[p]


class NeuralSplineFourierFilter(hk.Module):
"""A rotationally invariant filter parameterized by
a b-spline with parameters specified by a small NN."""

def __init__(self, n_knots=8, latent_size=16, name=None):
"""
n_knots: number of control points for the spline
"""
super().__init__(name=name)
self.n_knots = n_knots
self.latent_size = latent_size

def __call__(self, x, a):
"""
x: array, scale, normalized to fftfreq default
a: scalar, scale factor
"""
net = jnp.sin(hk.Linear(self.latent_size)(jnp.atleast_1d(a)))
net = jnp.sin(hk.Linear(self.latent_size)(net))

w = hk.Linear(self.n_knots + 1)(net)
k = hk.Linear(self.n_knots - 1)(net)

# make sure the knots sum to 1 and are in the interval 0,1
k = jnp.concatenate([jnp.zeros((1,)), jnp.cumsum(jax.nn.softmax(k))])

w = jnp.concatenate([jnp.zeros((1,)), w])

# Augment with repeating points
ak = jnp.concatenate([jnp.zeros((3,)), k, jnp.ones((3,))])

return _deBoorVectorized(jnp.clip(x / jnp.sqrt(3), 0, 1 - 1e-4), ak, w, 3)
2 changes: 1 addition & 1 deletion flowpm/raytracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,6 @@ def convergenceBorn(cosmo,
) * constant_factor * density_normalization
im = interpolation(p, dx, r, field_npix, coords)
convergence += im * tf.reshape(
tf.clip_by_value(1. - (r / r_s), 0, 1000), [1, 1, -1])
tf.clip_by_value(1. - (r / r_s), 0, 1000), [-1, 1, 1])

return convergence
129 changes: 129 additions & 0 deletions flowpm/redshift.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
import tensorflow as tf
import flowpm.scipy.integrate as integrate
import flowpm.scipy.interpolate as interpolate
import flowpm
from astropy.io import fits
from flowpm.NLA_IA import k_IA
import astropy.units as u
import numpy as np


def LSST_Y1_tomog(cosmology,
lensplanes,
box_size,
z_source,
z,
nz,
field_npix,
field_size,
nbin,
batch_size=1,
use_A_ia=False,
Aia=None):
"""This function takes as input a list of lensplanes and redshift distribution and returns a stacked convergence maps for each tomographic bin)

Parameters:
-----------

cosmology: `Cosmology`,
cosmology object.

lensplanes: list of tuples (r, a, density_plane),
lens planes to use

boxsize: float
Transverse comoving size of the simulation volume [Mpc/h]

z_source: array_like or tf.TensorArray
Redshift of the source plane

z: array_like or tf.TensorArray
Redshift-coordinates where the n(z) is evaluated

nz: array_like or tf.TensorArray of shape ([nbin, z])
User-defined n(z) distribution.

field_npix: Int
Resolution of the final interpolated plane

nbin: float.
Number of photometric bins to use.

batch_size: int
Size of batches

use_A_ia: Boolean
If true, the frunction will return the stack convergence map for the IA signal,
if false, the stack convergence map for the lensing signal

Aia: Float or None (default)
Amplitude parameter AI, describes the strength of the tidal coupling.

Returns
-------
tom_kappa: tf.TensorArray [nbins,field_npix,field_npix]
Stacked convergence maps for each tomographic bin

Note:
-------
Details of the redshift distribution used can be found in this paper: https://arxiv.org/pdf/2111.04917.pdf
"""

xgrid, ygrid = np.meshgrid(
np.linspace(0, field_size, field_npix,
endpoint=False), # range of X coordinates
np.linspace(0, field_size, field_npix,
endpoint=False)) # range of Y coordinates
coords = np.stack([xgrid, ygrid], axis=0) * u.deg
c = coords.reshape([2, -1]).T.to(u.rad)
if use_A_ia is not False:
sum_kappa = []
for j in range(len(z_source)):
im_IA = flowpm.raytracing.interpolation(
lensplanes[j][-1],
dx=box_size / 2048,
r_center=lensplanes[j][0],
field_npix=field_npix,
coords=c)
k_ia = k_IA(cosmology, lensplanes[j][1], im_IA, Aia)
sum_kappa.append(k_ia[0])
tom_kappa = [
integrate.trapz(
tf.reshape(
interpolate.interp_tf(z_source, z,
tf.cast(nz[i], dtype=tf.float32)),
[-1, 1, 1]) * sum_kappa, z_source) for i in range(nbin)
]
else:
m = flowpm.raytracing.convergenceBorn(
cosmology,
lensplanes,
dx=box_size / 2048,
dz=box_size,
coords=c,
z_source=z_source,
field_npix=field_npix)
tom_kappa = [
integrate.trapz(
tf.reshape(interpolate.interp_tf(z_source, z, nz[i]), [-1, 1, 1]) *
m, z_source) for i in range(nbin)
]
return tom_kappa


def systematic_shift(z, bias):
"""Implements a systematic shift in a redshift distribution

Parameters:
-----------

z: array_like or tf.TensorArray
Photometric redshift array


bias: float value
Nuisance parameters defining the uncertainty of the redshift distributions

"""
z = tf.convert_to_tensor(z, dtype=tf.float32)
return (tf.clip_by_value(z - bias, 0, 50))
24 changes: 24 additions & 0 deletions flowpm/scipy/integrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,27 @@ def simps(f, a, b, N=128):
y = f(x)
S = dx / 3 * tf.reduce_sum(y[0:-1:2] + 4 * y[1::2] + y[2::2], axis=0)
return S


def trapz(y, x):
""" Unequal space trapezoidal rule.
Approximate the integral of y with respect to x based on the trapezoidal rule.
x and y must be to the same length.
Trapezoidal rule's rule approximates the integral \int_a^b f(x) dx by the sum:
(\sum_{k=1}^{N} (x_{i-1}-x_{i}))(f(x_{i-1}) + f(x_{i}))/2
Parameters
----------
y : array_like or tf.TensorArray
vector of dependent variables

x : array_like or tf.TensorArray
vector of independent variables
Returns
-------
float or array_like or tf.TensorArray
Approximation of the integral of y with respect to x using
trapezoidal's rule with subintervals of unequal length.
"""
T = tf.reduce_sum(
(tf.reshape(x[1:] - x[:-1], [-1, 1, 1])) * (y[1:] + y[:-1]) / 2, axis=0)
return T
Loading