Commit 997d2595 authored by vlabarre's avatar vlabarre
Browse files

Install pythran with conda rather than with pip fixes problems

parent b52d62843afa
Pipeline #31390 passed with stage
in 16 minutes and 58 seconds
#!/bin/bash
source ../setup_env_base.sh
set -e
conda env remove --name env_fluidsim -y
conda env remove --name env_fluidsim #-y
# Note: The python version is important for packages compatibility
conda create -n env_fluidsim # python=3.8 #mamba
conda create -y -n env_fluidsim mamba
conda activate env_fluidsim
mamba install -y \
ipython scipy "blas-devel[build=*openblas]" \
matplotlib pandas psutil pillow scikit-image \
mako clangdev \
# you can also use mamba
conda install ipython python=3.8.8 scipy "blas-devel[build=*openblas]" \
matplotlib pandas psutil pillow scikit-image mako clangdev \
mercurial hg-git hg-evolve
# Using Pythran master > 0.11.0 (possible performance boost)
pip install git+https://github.com/serge-sans-paille/pythran#egg=pythran
#pip install git+https://github.com/serge-sans-paille/pythran#egg=pythran
#pip install pythran
conda install pythran # (Vincent) I don't know why, but it is important to install with conda (and not pip)
#pip install hg-fluiddyn transonic setuptools cython pytest
pip install hg-fluiddyn transonic setuptools cython pytest
# Better than with conda install because we don't want the fftw conda package
pip install pyfftw
pip install mpi4py --no-binary mpi4py
# to install hdf5 and h5py parallel
export HDF5_DIR=/gpfslocalsup/spack_soft/hdf5/1.12.0/gcc-8.3.1-qj43pa5rathksrgn4sx2ici42tg75nun
CC="mpicc" HDF5_MPI="ON" pip install --no-deps --no-binary=h5py h5py
# Install hdf5 and h5py parallel
# (Check the HDF5_DIR with module show)
HDF5_DIR=/gpfslocalsup/spack_soft/hdf5/1.12.0/gcc-8.3.1-qj43pa5rathksrgn4sx2ici42tg75nun
CC="mpicc" HDF5_MPI="ON" HDF5_DIR=/gpfslocalsup/spack_soft/hdf5/1.12.0/gcc-8.3.1-qj43pa5rathksrgn4sx2ici42tg75nun pip install --no-deps --no-binary=h5py h5py
python -c "import h5py; assert h5py.h5.get_config().mpi, 'h5py not built with MPI support'"
......@@ -3,7 +3,6 @@ rm -rf fluiddyn fluidfft
hg clone https://foss.heptapod.net/fluiddyn/fluiddyn
hg clone https://foss.heptapod.net/fluiddyn/fluidfft
hg clone https://foss.heptapod.net/fluiddyn/transonic # TODO: remove this line when topic fix-bug-mpi-barrier-jean-zay is merged
hg clone https://foss.heptapod.net/fluiddyn/transonic
cd $WORK/Dev/fluidsim/doc/examples/clusters/jean_zay/install
......@@ -7,20 +7,21 @@ hg up cluster-jean-zay # cluster-jean-zay should be replaced by default when mer
make clean
pip install -e .
# TODO: Remove the line with transonic when fix-bug-mpi-barrier-jean-zay is merged
cd $WORK/Dev/transonic
hg pull
hg up fix-bug-mpi-barrier-jean-zay
hg up default
make clean
pip install -e .
cd $WORK/Dev/fluidfft
hg pull
hg up default
# pip install -e . seems to run something with mpi, which is forbidden
python setup.py develop
# TODO: QUESTION for Vincent: does this work for fluidfft?
# pip install -e . --no-build-isolation
# Vincent: No
cd $WORK/Dev/fluidsim
make cleanall
......
......@@ -13,7 +13,7 @@ pkgdir="${WORK}/.local/${pkgname}/${pkgver}"
# C and Fortran 90 MPI compilers
CC=mpicc
#FC=mpif90
FC='mpif90 -nofor_main'
#FC='mpif90 -nofor_main'
# FFTW
# ----
......@@ -54,7 +54,7 @@ build() {
LDFLAGS="-lm" ./configure --enable-gnu --enable-openmpi --enable-fftw \
--with-fftw=/gpfslocalsup/spack_soft/fftw/3.3.8/gcc-8.3.1-yancwmvy7k2qaxtswmzlvyda5bsahmoh/ \
--prefix=${pkgdir} CC=${CC} CCLD=${FC}
--prefix=${pkgdir} CC=${CC} CCLD='mpif90 -nofor_main'
make
}
......
......@@ -6,7 +6,7 @@ with the forcing tcrandom_anisotropic
from fluiddyn.util import mpi
#mpi.comm.barrier()
mpi.print_sorted("starting")
#mpi.print_sorted("starting")
import numpy as np
......@@ -28,7 +28,7 @@ params.output.sub_directory = "Fluidsim_Data/examples"
params.short_name_type_run = "aniso_" + kind
#params.projection = "poloidal"
nx = ny = nz = 320 * 2
nx = ny = nz = 160
Lx = 2.0 * np.pi
params.oper.nx = nx
params.oper.ny = ny
......@@ -39,12 +39,12 @@ params.oper.Lz = Lz = Lx / nx * nz
params.time_stepping.USE_T_END = True
params.time_stepping.cfl_coef = 0.2
params.time_stepping.t_end = 0.2
params.time_stepping.t_end = 2.0
# Brunt Vaisala frequency
params.N = 2.0
# Viscosity
params.nu_2 = 1e-3
params.nu_2 = 1e-2
mpi.printby0(f"N = {params.N:.3e}, nu_2 = {params.nu_2:.3e}")
......
......@@ -4,11 +4,11 @@ from fluiddyn.clusters.idris import JeanZay as Cluster
cluster = Cluster()
nb_nodes = 8
nb_nodes = 1
nb_cores_per_node = cluster.nb_cores_per_node
nb_procs = nb_mpi_processes = nb_nodes * nb_cores_per_node
walltime = "00:20:00"
walltime = "00:10:00"
cluster.commands_setting_env.append(
"export TRANSONIC_MPI_TIMEOUT=100"
......@@ -22,7 +22,7 @@ cluster.submit_script(
"run_simul.py",
name_run=f"ns3d.strat",
nb_nodes=nb_nodes,
nb_cores_per_node=nb_cores_per_node, # it is computed automatically I think
nb_cores_per_node=nb_cores_per_node,
nb_mpi_processes=nb_mpi_processes,
omp_num_threads=1,
ask=True,
......
......@@ -4,10 +4,14 @@ alias duh1='du -h --max-depth=1'
module load python/3.8.8 gcc/8.3.1 openmpi/4.1.1 hdf5/1.12.0-mpi
module load fftw/3.3.8-mpi pfft/1.0.8-alpha-mpi
#module load mercurial/6.0
conda init bash
source $HOME/.bashrc
conda activate base
# needed to use clang for Pythran
unset CC
unset CXX
......@@ -16,4 +20,4 @@ export FLUIDSIM_PATH=$WORK
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$WORK/.local/p3dfft/2.7.6/lib
conda activate base
#conda activate base
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment