# HG changeset patch
# User paugier <pierre.augier@ens-lyon.org>
# Date 1526294028 -7200
#      Mon May 14 12:33:48 2018 +0200
# Node ID f4f829494bfc77dac46ff91fdaab56be76e28edc
# Parent  169a2fa2ee1d096d92c92ca913befda25defc675
Code comparison fluidsim.

diff --git a/bib.bib b/bib.bib
--- a/bib.bib
+++ b/bib.bib
@@ -257,7 +257,7 @@
 }
 
 @article{guelton2015pythran,
-  title={Pythran: Enabling static optimization of scientific python programs},
+  title={Pythran: {E}nabling static optimization of scientific python programs},
   author={Guelton, Serge and Brunet, Pierrick and Amini, Mehdi and Merlini,
                   Adrien and Corbillon, Xavier and Raynaud, Alan},
   journal={Computational Science \& Discovery},
@@ -269,7 +269,7 @@
 }
 
 @article{behnel2011cython,
-  title={Cython: The best of both worlds},
+  title={Cython: {T}he best of both worlds},
   author={Behnel, Stefan and Bradshaw, Robert and Citro, Craig and Dalcin,
                   Lisandro and Seljebotn, Dag Sverre and Smith, Kurt},
   journal={Computing in Science \& Engineering},
@@ -322,7 +322,8 @@
 }
 
 @article{DeloncleBillantChomaz2008,
-  title={Nonlinear evolution of the zigzag instability in stratified fluids: a shortcut on the route to dissipation},
+  title={Nonlinear evolution of the zigzag instability in stratified fluids: a
+                  shortcut on the route to dissipation},
   author={Deloncle, Axel and Billant, Paul and Chomaz, Jean-Marc},
   journal={Journal of Fluid Mechanics},
   volume={599},
@@ -342,3 +343,15 @@
   year={2005},
   publisher={IEEE}
 }
+
+@article{ascher1997implicit,
+  title={Implicit-explicit Runge-Kutta methods for time-dependent partial
+                  differential equations},
+  author={Ascher, Uri M and Ruuth, Steven J and Spiteri, Raymond J},
+  journal={Applied Numerical Mathematics},
+  volume={25},
+  number={2-3},
+  pages={151--167},
+  year={1997},
+  publisher={Elsevier}
+}
\ No newline at end of file
diff --git a/fluiddyn/fluiddyn_paper.tex b/fluiddyn/fluiddyn_paper.tex
--- a/fluiddyn/fluiddyn_paper.tex
+++ b/fluiddyn/fluiddyn_paper.tex
@@ -585,7 +585,7 @@
 are Python 3 compatible now.}.  Especially for scientific applications, we can
 now work only in Python 3. For instance, \href{https://github.com/%
 numpy/numpy/blob/master/doc/neps/dropping-python2.7-proposal.rst}{future
-releases of \numpy} and of \href{http://www.python3statement.org/}{the main
+releases of \Numpy} and of \href{http://www.python3statement.org/}{the main
 scientific packages} will soon drop compatibility with Python 2. These changes
 open doors to very clean, coherent and potentially faster Python
 experience. Therefore, one should not use Python 2 for science anymore and
@@ -627,9 +627,9 @@
 
 \item Science!
 Python is now widely used in scientific applications. It has now a mature and
-powerful scientific ecosystem with well-established based packages (\numpy for
-N-dimensional homogeneous arrays, \pack{scipy} as the fundamental toolkit for
-scientific computing, \pack{matplotlib} for plotting and \pack{pandas} for data
+powerful scientific ecosystem with well-established based packages (\Numpy for
+N-dimensional homogeneous arrays, \Scipy as the fundamental toolkit for
+scientific computing, \pack{Matplotlib} for plotting and \pack{Pandas} for data
 structures) and several more specialized packages (to name a few, \pack{h5py},
 \pack{mpi4py}, \pack{skimage}, \pack{sklearn}, ...). There are now great (and
 often shiny) tools for most of the applications. For example, the
@@ -637,8 +637,8 @@
 landscape of visualization tools usable through Python} is impressive.
 %
 Python is one of the main languages for data science with packages such as
-\pack{pandas}, \pack{statmodels}, \pack{sklearn}, \pack{keras} and
-\pack{tensorflow}.
+\pack{Pandas}, \pack{statmodels}, \pack{sklearn}, \pack{keras}, \pack{tensorflow}
+and \pack{pytorch}.
 
 There are now ready-to-use ``batteries-included'' Python installers
 (similar to Matlab), like in particular the
@@ -695,7 +695,7 @@
 curve compared to Python.
 %
 Nevertheless, this strategy has given rise to the base modules of the scientific
-Python stack, namely \numpy, \pack{scipy} and \pack{matplotlib}.
+Python stack, namely \Numpy, \Scipy and \pack{Matplotlib}.
 
 Python developers do not actually have to write the compiled extensions in C,
 C++ or Fortran, since there are tools to generate them automatically from
@@ -737,9 +737,13 @@
 This strategy can yield good results with other languages as for example Matlab
 or Julia.
 %
-Pypy, an alternative interpreter written in Python, has a JIT
-compiler. However, Pypy is not widely used for scientific applications mainly
-because of compatibility problems with the extensions written for CPython.
+Pypy, an alternative interpreter written in Python, has a JIT compiler. However,
+Pypy is not widely used for scientific applications mainly because of
+compatibility problems with the extensions written for CPython.
+%
+However this could change since a recent version of Pypy (6.0) supports the main
+packages of the Python scientific stack (\Numpy, \Scipy, \pack{Matplotlib},
+\pack{Pandas}, etc.).
 
 Adding a JIT to the interpreter CPython has been notoriously difficult (see
 \href{https://faster-cpython.readthedocs.io/}{faster-cpython}). One of the
@@ -1545,7 +1549,7 @@
 
 Displays all important information related to software and hardware. It
 includes detailed information such as currently installed FluidDyn packages,
-other third-party packages, C compiler, MPI and \numpy configuration.
+other third-party packages, C compiler, MPI and \Numpy configuration.
 
 \item \codeinline{fluiddump}
 
@@ -1628,11 +1632,11 @@
 We list here only the dependencies of the base package \fluidpack{dyn}.
 
 \begin{itemize}
-\item {\bf Minimum:} \numpy, \pack{matplotlib}, \pack{psutil}, \pack{future},
+\item {\bf Minimum:} \Numpy, \pack{Matplotlib}, \pack{psutil}, \pack{future},
 \pack{subprocess32} (for Python 2.7 only).
 
 \item {\bf Full functionality:} \pack{h5py}, \pack{h5netcdf}, \pack{mpi4py},
-\pack{scipy}, \pack{pyfftw} (requires FFTW library), \pack{pillow}.
+\pack{Scipy}, \pack{pyfftw} (requires FFTW library), \pack{pillow}.
 
 \item {\bf Optional:} OpenCV with Python bindings, \pack{scikit-image}.
 \end{itemize}
diff --git a/fluidsim/fluidsim_paper.tex b/fluidsim/fluidsim_paper.tex
--- a/fluidsim/fluidsim_paper.tex
+++ b/fluidsim/fluidsim_paper.tex
@@ -53,9 +53,9 @@
 fluid mechanics community and intended for both educational as well as research
 purposes.
 %
-Solvers in \fluidpack{sim} are scalable, high-performance computing (HPC) codes
+Solvers in \fluidpack{sim} are scalable, High-Performance Computing (HPC) codes
 which are powered under the hood by the rich, scientific Python ecosystem and the
-application programming interfaces (API) provided by \fluidpack{dyn} and
+Application Programming Interfaces (API) provided by \fluidpack{dyn} and
 \fluidpack{fft} packages \cite[]{fluiddyn, fluidfft}.
 %
 The present article describes the design aspects of \fluidpack{sim}, viz.\ use
@@ -122,11 +122,11 @@
 
 For the above reasons, majority of \fluidpack{sim}'s code-base, in terms of
 line of code, is written using pure Python syntax. However, this is done
-without compromising performance, by making use of libraries such as \numpy,
-\pack{cython} and \pack{pythran}.
+without compromising performance, by making use of libraries such as \Numpy,
+\pack{Cython} and \pack{Pythran}.
 
 For generic applications such as initialization and postprocessing operations,
-\numpy functions and data types are sufficient, since these functions are used
+\Numpy functions and data types are sufficient, since these functions are used
 sparingly.  Computationally intensive tasks such as time-stepping and linear
 algebra operators which are used in every single iteration must be offloaded to
 compiled extensions.
@@ -134,36 +134,47 @@
 This optimization strategy can be considered as the computational equivalent of
 the \href{https://en.wikipedia.org/wiki/Pareto_principle}{Pareto principle}, also
 known as the 80/20 rule\footnote{See \citet{behnel_cython2011},
-\href{https://wiki.haskell.org/Why_Haskell_matters}{wiki.haskell.org/Why\_Haskell\_matters}}.
+\href{https://wiki.haskell.org/Why_Haskell_matters}{%
+wiki.haskell.org/Why\_Haskell\_matters}}.
 %
 The goal is to optimize such that ``80 percent of the runtime is spent in 20
 percent of the source code'' \cite[]{meyers2012effective}.
 %
-Here, \pack{cython} and \pack{pythran} packages comes in handy. There are some key
-differences between these packages \cite[][]{guelton2018pythran, fluiddyn}.
+Here, \pack{Cython} \citep{behnel2011cython} and \pack{Pythran}
+\citep{guelton2018pythran} compilers comes in handy.
+%
+These two compilers are presented in the companion paper on \fluidpack{dyn}
+\citep{fluiddyn} and an example on how we use \pack{Pythran} to reach similar
+performance that with Fortran by writing only Python code is described in the
+companion paper on \fluidpack{fft} \citep{fluidfft}.
+
+% There are some key differences between these packages.
 
-\pack{cython}\ \cite{behnel_cython2011} is a very generic and mature library based
-on Pyrex language --- which is Python with some additional syntax.  It has the
-capability to interface between other C or C++ code by generating shared libraries
-which can be imported by Python.  \pack{cython} can also generate such libraries
-from scratch and even has object-oriented syntax if required.  Some of the
-drawbacks of \pack{cython} include use of Pyrex which can feel verbose and
-esoteric to a Python developer.  While it supports \numpy\ arrays as inputs, it
-requires the developer to explicitly write loops using indices, which can be
-verbose and less generic than array notations.
+% \pack{Cython}\ \cite{behnel_cython2011} is a very generic and mature library
+% based on Pyrex language --- which is Python with some additional syntax.  It has
+% the capability to interface between other C or C++ code by generating shared
+% libraries which can be imported by Python.  \pack{Cython} can also generate such
+% libraries from scratch and even has object-oriented syntax if required.  Some of
+% the drawbacks of \pack{Cython} include use of Pyrex which can feel verbose and
+% esoteric to a Python developer.  While it supports \Numpy\ arrays as inputs, it
+% requires the developer to explicitly write loops using indices, which can be
+% verbose and less generic than array notations.
 
-On the other hand, \pack{pythran}\ \cite{guelton_pythran2013, guelton2018pythran}
-is a relatively recent development, specializing in scientific applications.
-\pack{pythran} can export simple Python functions into extensions with the help of
-type annotations written in Python comments.  When \pack{pythran} is not being
-used these functions can work like plain Python functions.  \pack{pythran} can
-also recognize \numpy functions and optimizes code while building extensions
-internally.  However, \pack{pythran} does not support classes and custom data
-types as function parameters.  Until very recently \pack{pythran} did not support
-interfacing with foreign C or C++ extensions.
+% On the other hand, \pack{Pythran}\ \cite{guelton_pythran2013,
+% guelton2018pythran} is a relatively recent development, specializing in
+% scientific applications.  \pack{Pythran} can export simple Python functions into
+% extensions with the help of type annotations written in Python comments.  When
+% \pack{Pythran} is not being used these functions can work like plain Python
+% functions.  \pack{Pythran} can also recognize \Numpy functions and optimizes
+% code while building extensions internally.  However, \pack{Pythran} does not
+% support classes and custom data types as function parameters.
 
 The result of using such an approach can be shown by measuring the performance
 of \fluidpack{sim}, as shown in the forthcoming sections.
+%
+We will demonstrate that a very large percentage of the elapsed time is spent in
+the execution of optimized compiled functions so that the ``Python cost'' is
+negligible.
 
 
 \subsubsection*{Target audiences}
@@ -180,12 +191,12 @@
 \item \emph{Core developers}, who develop the base classes, in particular the
 operators and time stepping classes.  One may also sometime need to write
 compiled extensions to improve runtime performance. To do this, desirable
-traits include strong knowledge in Python, \numpy, \pack{cython} and
-\pack{pythran}.
+traits include strong knowledge in Python, \Numpy, \pack{Cython} and
+\pack{Pythran}.
 \end{itemize}
 
 This metapaper is intended as a short introduction to \fluidpack{sim} and its
-implementation, written from a user-perspective.  A more comprehensive and
+implementation, written mainly from a user-perspective.  A more comprehensive and
 hands-on look at how to use \fluidpack{sim} can be found in the
 tutorials\footnote{See
 \href{https://fluidsim.readthedocs.io/en/latest/tutorials.html}{fluidsim.readthedocs.io/en/latest/tutorials.html}},
@@ -218,13 +229,13 @@
 \subsubsection*{Package organization}
 
 \fluidpack{sim} is meant to serve as a framework for numerical solvers using
-different methods. For the present version of \fluidpack{sim} there is support
-for finite difference and pseudo-spectral methods. An example of a finite
-difference solver is \codeinline{fluidsim.solvers.ad1d} which solves the 1D
-advection equation. There are also solvers which do not rely on most of the
-base classes, such as \codeinline{fluidsim.base.basilisk} which implements a 2D
-adaptive meshing solver as a sub-package. The collection of solvers using
-pseudo-spectral methods are more feature-rich in comparison.
+different methods. For the present version of \fluidpack{sim} there is support for
+finite difference and pseudo-spectral methods. An example of a finite difference
+solver is \codeinline{fluidsim.solvers.ad1d} which solves the 1D advection
+equation. There are also solvers which do not rely on most of the base classes,
+such as \codeinline{fluidsim.base.basilisk} which implements a 2D adaptive meshing
+solver based on the CFD code \href{http://basilisk.fr/}{Basilisk}. The collection
+of solvers using pseudo-spectral methods are more feature-rich in comparison.
 
 The code is organized into the following sub-packages:
 
@@ -232,8 +243,7 @@
 \item \codeinline{fluidsim.base}: contains all base classes and a solver for
 the trivial equation $\partial_t \mathbf{\hat{u}} = 0 $.
 \item \codeinline{fluidsim.operators}: specialized linear algebra and numerical
-method operators (for eg., divergence, curl, variable transformations,
-dealiasing).
+method operators (e.g., divergence, curl, variable transformations, dealiasing).
 \item \codeinline{fluidsim.solvers}: solvers and postprocessing modules for
 problems such as 1D advection, 2D and 3D Navier-Stokes, 2D Boussinesq,
 one-layer shallow water and F\"oppl-von K\'arm\'an equations.
@@ -241,13 +251,13 @@
 simulation, to test, and to benchmark a solver.
 \end{itemize}
 
-Subpackages \codeinline{base} and \codeinline{operators} form the backbone of
-this package, and are not meant to be used by the user explicitly.
+Subpackages \codeinline{base} and \codeinline{operators} form the backbone of this
+package, and are not meant to be used by the user explicitly.
 
-In practice, the user can make an entirely new solver for a new problem using this
+In practice, one can make an entirely new solver for a new problem using this
 framework by simply writing one or two importable files containing three classes:
 \begin{itemize}
-\item a \codeinline{InfoSolver} class\footnote{Inheriting from the base class
+\item an \codeinline{InfoSolver} class\footnote{Inheriting from the base class
 \codeinline{fluidsim.base.solvers.info\_base.InfoSolverBase}.}, containing the
 information on which classes will be used for the different tasks in the solver
 (time stepping, state, operators, output, etc.).
@@ -256,8 +266,8 @@
 solved.
 \item a \codeinline{State} class\footnote{Inheriting from the base class
 \codeinline{fluidsim.base.state.StateBase}.} defining all physical variables and
-its spectral counterparts being solved (for example: $u_x$ and $u_y$) and methods
-to compute one variable from another.
+their spectral counterparts being solved (for example: $u_x$ and $u_y$) and
+methods to compute one variable from another.
 \end{itemize}
 
 We now turn our attention to the simulation object which illustrates how to
@@ -379,8 +389,8 @@
 plane.  It relies on \codeinline{sim.state} to load or compute the variables
 into arrays.
 %
-\2 \codeinline{sim.output.spectra}: energy spectra as line plots summed over
-wavenumber shells.
+\2 \codeinline{sim.output.spectra}: energy spectra as line plots (i.e. as
+functions of the module or a component of wavenumber).
 %
 \2 \codeinline{sim.output.spatial\_means}: mean quantities such as energy,
 enstrophy, forcing power, dissipation.
@@ -395,19 +405,18 @@
 solved for and their spectral equivalents, along with all required variable
 transformations.
 %
-Also includes high-level objects, aptly named
-\codeinline{sim.state.state\_phys} and \codeinline{sim.state.state\_fft} to
-hold the arrays.
+Also includes high-level objects, aptly named \codeinline{sim.state.state\_phys}
+and \codeinline{sim.state.state\_spect} to hold the arrays.
 %
 \1 \codeinline{sim.time\_stepping}: Generic numeric time-integration object
 which dynamically determines the time-step using the CFL criterion for specific
 solver and advances the state variables using Runge-Kutta method of order 2 or
 4.
 %
-\1 \codeinline{sim.init\_fields}: Used only once to initialize all state
-variables with simple kinds of flow structures, for example a dipole vortex,
-base flow with constant value for all gridpoints, grid of vortices, narrow-band
-noise etc.
+\1 \codeinline{sim.init\_fields}: Used only once to initialize all state variables
+from a previously generated output file or with simple kinds of flow structures,
+for example a dipole vortex, base flow with constant value for all gridpoints,
+grid of vortices, narrow-band noise, etc.
 %
 \1 \codeinline{sim.forcing}: Initialized only when
 \codeinline{params.forcing.enable} is set as \codeinline{True} and it computes
@@ -448,7 +457,7 @@
 
     <State class_name="StateNS2D" keys_computable="[]"
            keys_linear_eigenmodes="['rot_fft']" keys_phys_needed="['rot']"
-           keys_state_fft="['rot_fft']" keys_state_phys="['ux', 'uy', 'rot']"
+           keys_state_spect="['rot_fft']" keys_state_phys="['ux', 'uy', 'rot']"
            module_name="fluidsim.solvers.ns2d.state"/>
 
     <TimeStepping class_name="TimeSteppingPseudoSpectral"
@@ -567,10 +576,10 @@
 Navier-Stokes solver shows that most of the time is spent in inverse and
 forward FFT calls (\codeinline{ifft\_as\_arg} and
 \codeinline{fft\_as\_arg}). For the sequential solver, over $97\%$ of the time
-is spent in compiled C++ extensions, built using \pack{cython} and
-\pack{pythran} and all pure Python functions limited in the \emph{other}
-category.  \pack{cython} extensions are responsible for interfacing with FFT
-operators and also for the time-step algorithm.  \pack{pythran} extensions are
+is spent in compiled C++ extensions, built using \pack{Cython} and
+\pack{Pythran} and all pure Python functions limited in the \emph{other}
+category.  \pack{Cython} extensions are responsible for interfacing with FFT
+operators and also for the time-step algorithm.  \pack{Pythran} extensions are
 used to translate most of the linear algebra operations into optimized,
 statically compiled extensions.  With 16 processes deployed in parallel time
 spent in compiled extensions falls to around $93\%$, with increased
@@ -602,8 +611,8 @@
 
 In future releases of \fluidpack{sim}, the performance of solvers can
 potentially improve with the possibility of replacing some of the Python
-functions mentioned above with equivalent \pack{pythran} extensions which can
-interface with other \pack{cython} based functions.
+functions mentioned above with equivalent \pack{Pythran} extensions which can
+interface with other \pack{Cython} based functions.
 
 
 \subsubsection*{Scalability}
@@ -795,7 +804,7 @@
 
 \subsection*{CFD pseudo-spectral code comparisons}
 
-%TODO: Compare profiling of Dedalus SpectralDNS, NS3D
+%TODO-DONE: Compare profiling of Dedalus SpectralDNS, NS3D
 % See the file notes_compare_codes.md
 % Global comparison, not only performance (which is only one aspect for a CFD
 % code)
@@ -808,6 +817,26 @@
 \item \href{http://dedalus-project.org/}{Dedalus} \citep{burns_dedalus} is ``a
 flexible framework for spectrally solving differential equations''. It is very
 versatile and the user describes the problem to be solved symbolically.
+%
+This approach is very different than the one of \fluidpack{sim}, where the
+equation are described with simple \Numpy code.  There is no equivalent of the
+\fluidpack{sim} concept of ``solver'', i.e. a class corresponding to a set of
+equations with specialized outputs (with the corresponding plots).  To run a
+simulation with Dedalus, one has to describe the problem using mathematical
+equations.  This can be very convenient because it is very versatile and it is not
+necessary to understand how Dedalus works to define a new problem.  However, this
+approach has also drawbacks:
+\begin{itemize}
+\item Even for very standard problems, one needs to describe the problem in the
+launching script.
+\item There is a potentially long initialization phase when Dedalus treats the
+user input and prepare the ``solver''.
+\item Even when a user knows how to define a problem symbolically, it is not
+simple to understand how the problem is solved by Dedalus and how to interact with
+the program with Python.
+\item There is no forcing scheme or specialized outputs already implemented in
+Dedalus.
+\end{itemize}
 
 \item \href{https://github.com/spectralDNS/spectralDNS}{SpectralDNS}
 \citep{mortensen_spectraldns2016} is a ``high-performance pseudo-spectral
@@ -815,14 +844,47 @@
 this solver is that it is written entirely in Python using NumPy, MPI for Python
 (mpi4py) and pyFFTW.''
 
-SpectralDNS is therefore technically very similar to \fluidpack{sim}.
+Therefore, SpectralDNS is technically very similar to \fluidpack{sim}.
+%
+Some differences are that SpectralDNS has no object oriented API and that the user
+has to define output and forcing in the launching script\footnote{See
+\href{https://github.com/spectralDNS/spectralDNS/tree/master/demo}{the demo
+scripts of SpectralDNS}.}, which are thus usually much longer than for
+\fluidpack{sim}.
+%
+Moreover, the parallel Fourier transforms are done with the Python package
+\href{https://github.com/spectralDNS/mpiFFT4py}{\pack{mpiFFT4py}}, which is able
+to use only the FFTW library and not other libraries as with \fluidpack{fft}
+\citep[][]{fluidfft}.
 
-\item \href{https://bitbucket.org/paugier/ns3d}{NS3D} \cite[see for
-example][]{DeloncleBillantChomaz2008} is a highly efficient code (parallelized
-with MPI and OpenMP) written in Fortran. It has been highly optimized by
-generations of PhD students at
-\href{https://www.ladhyx.polytechnique.fr}{LadHyX}. However, it is limited to 1d
-decomposition for the 3d FFT \cite[][]{fluidfft}.
+\item \href{https://bitbucket.org/paugier/ns3d}{NS3D} is a highly efficient
+pseudo-spectral Fortran code.
+%
+It has been written in the laboratory
+\href{https://www.ladhyx.polytechnique.fr}{LadHyX} and used for several studies
+involving simulations (in 3d and in 2d) of the Navier-Stokes equations under the
+Boussinesq approximation with stratification and system rotation \cite[see for
+example][]{DeloncleBillantChomaz2008}.
+%
+NS3D has been highly optimized and it is very efficient for sequential and
+parallel simulations (using MPI and OpenMP).  However, the parallelization is
+limited to 1d decomposition for the FFT \cite[][]{fluidfft}.
+%
+Another weakness compared to \fluidpack{sim} is that NS3D uses simple binary files
+instead of HDF5 and NetCDF4 files for \fluidpack{sim}.  Therefore, visualization
+programs like Paraview or Visit are not able to load NS3D data.
+
+As for many Fortran codes, Bash and Matlab are used for launching and
+post-processing, respectively.
+%
+In term of user experience, this can be a drawback compared to the coherent
+framework \fluidpack{sim} for which the user works only with Python.
+
+In contrast to the framework \fluidpack{sim} for which it is easy to define a new
+solver for a new set of equations, NS3D is specialized in solving the
+Navier-Stokes equations under the Boussinesq approximation.  Using NS3D to solve a
+new set of equations would require very deep and difficult changes in many places
+in the code.
 
 \end{itemize}
 
@@ -830,8 +892,26 @@
 compare only sequential runs.  We have already discussed in details the issue of
 the scalability of pseudo-spectral codes based on Fourier transforms in the
 previous section and in the companion paper \citep{fluidfft}.
+%
+We compare the code with a very simple and standard task, running ten time steps
+with the Runge-Kutta 4 method.
+%
+Note that Dedalus does not implement the standard fully explicit RK4
+method\footnote{See
+\href{https://bitbucket.org/dedalus-project/dedalus/issues/38/%
+slow-simulation-ns2d-over-a-biperiodic}{the Dedalus issue 38.}}. We thus use for
+Dedalus the most similar time stepping scheme, RK443, a 4-stage, third-order mixed
+implicit-explicit scheme described in \citet{ascher1997implicit}.
+%
+Note that in the other codes, part of the linear terms are also treated
+implicitly.
+%
+Note also than in several cases, the time step is not first limited by the
+stability of the time scheme but by other needs (periods of waves, accuracy,
+etc.), so these benchmarks are representative of elapsed time for real-life
+simulations.
 
-\paragraph{Bi-dimensional simulations}
+\paragraph{Bi-dimensional simulations.}
 
 \begin{table}
 \centering
@@ -843,46 +923,49 @@
  1024$^2$ &              2.69 &     43.00 &          3.48 &   3.96 \\
 \hline
 \end{tabular}
-\caption{Elapsed times (in seconds) for 10 time steps for two bidimensional cases
-and the four CFD codes.}
+\caption{Elapsed times (in seconds) for ten RK4 time steps for two bidimensional
+cases and the four CFD codes.}
 \label{table:compare}
 \end{table}
 
-We first compare elapsed times for two resolutions (512$^2$ and 1024$^2$) over a
-bi-dimensional space.  The results are summarized in Table~\ref{table:compare}.
+We first compare the elapsed times for two resolutions (512$^2$ and 1024$^2$) over
+a bi-dimensional space.  The results are summarized in Table~\ref{table:compare}.
 %
 The results are consistent for the two resolutions.  \fluidpack{sim} is the faster
-code for these cases.  Dedalus is more than one order of magnitude slower.  The
-two other codes have similar performance, slightly slower than \fluidpack{sim} and
-much faster than Dedalus
+code for these cases.  Dedalus is more than one order of magnitude slower but as
+already said the time stepping method is different.  Note also that Dedalus has
+more been optimized for bounded domains with Chebyshev methods.
 %
-% todo: interpret and comment the results...
-The Fortran code NS3D is surprisingly slow (47\% slower than \fluidpack{sim})
-since there is no specialized numerical scheme for the 2d case in NS3D, so that
-more FFTs have to be performed compared to SpectralDNS and \fluidpack{sim}.
+The two other codes SpectralDNS and NS3D have similar performance: slightly slower
+than \fluidpack{sim} and much faster than Dedalus.
+%
+Surprisingly, the Fortran code NS3D is slower (47\%) than the Python code
+\fluidpack{sim}.  This can be explained by the fact that there is no specialized
+numerical scheme for the 2d case in NS3D, so that more FFTs have to be performed
+compared to SpectralDNS and \fluidpack{sim}.
 %
 This shows the importance of implementing the adapted algorithm for each problem,
 which is much easier with a highly modular code as \fluidpack{sim} than with a
 specialized code as NS3D.
 
 
-\paragraph{Tri-dimensional simulations}
+\paragraph{Tri-dimensional simulations.}
 
-We now turn our attention to a tri-dimensional case, what are the elapsed time for
-10 time steps for a resolution 128$^3$.
+We now compare the elapsed times for ten RK4 time steps for a tri-dimensional case
+with a resolution 128$^3$.
 %
-Dedalus is extremely slow and does not seem to be adapted for this case so we do
-not give exact elapsed time for this code.
+Dedalus is slow and does not seem to be adapted for this case so we do not give
+exact elapsed time for this code.
 %
-SpectralDNS is slightly slower (11.55 s) than the two other codes (9.45 for
-\fluidpack{sim} and 9.52 s for ns3d). This difference is mainly explained by the
+SpectralDNS is slightly slower (11.55 s) than the two other codes (9.45 s for
+\fluidpack{sim} and 9.52 s for NS3D). This difference is mainly explained by the
 slower FFTs for SpectralDNS.
 
 \begin{figure}[htp]
 \centering
 \includegraphics[width=\linewidth]{./tmp/fig_compare_with_ns3d}
 \caption{Comparison of the execution times for a 3d case (128$^3$, 10 time steps)
-between ns3d (blue bars) and \codeinline{fluidsim.solvers.ns3d} (yellow bars).
+between NS3D (blue bars) and \codeinline{fluidsim.solvers.ns3d} (yellow bars).
 %
 The first two bars correspond to the total time and the others to the main tasks
 in terms of time consumption, namely FFT, Runge-Kutta 4, curl, vector product and
@@ -891,42 +974,48 @@
 \end{figure}
 
 Figure~\ref{fig:compare:with:ns3d} presents a more detailed comparison between
-ns3d (blue bars) and \codeinline{fluidsim.solvers.ns3d} (yellow bars).
+NS3D (blue bars) and \fluidpack{sim} (yellow bars).
 %
 The total elapsed times is mainly spent in five tasks: FFTs, Runge-Kutta 4, curl,
 vector product and ``projection''. The times spent to perform these tasks are
 compared for the two codes.
 
-We see that NS3D's FFTs are very fast: the FFT execution is 0.55 s longer for
+We see that FFTs in NS3D are very fast: the FFT execution is 0.55 s longer for
 \fluidpack{sim} (nearly 9\% longer). This difference is especially important for
-sequential run for which there is no communication cost in the FFT computation.
+sequential run for which there is no communication cost in the FFT computation,
+since Fortran is not better and faster than Python for input-output bounded tasks
+as MPI communication.
 
 This difference can partially be explained by the fact that in NS3D, all FFTs are
-inplace (so the input can be erased during the transform).  Another factor is that
-the flag FFTW\_PATIENT is used in ns3d which leads to very long initialization and
-some times faster FFTs. Since we did not see significant speed-up by using this
-flag in \fluidpack{sim} and that we also care about initialization time, this flag
-is not used and we prefer to use the flag FFTW\_MEASURE, which usually leads to
-similar performance.
+inplace (so the input can be erased during the transform).  On the one hand, this
+choice is good for performance and for a lower memory consumption.  On the other
+hand, it makes the code harder to write, to read and to modify.  Since memory
+consumption is much less a problem than before with recent clusters and that code
+simplicity is highly important for a framework like \fluidpack{sim}, we choose to
+use out-of-place FFTs in \fluidpack{sim}.
+%
+Another factor is that the flag FFTW\_PATIENT is used in NS3D which leads to very
+long initialization and some times faster FFTs. Since we did not see significant
+speed-up by using this flag in \fluidpack{sim} and that we also care about
+initialization time, this flag is not used and we prefer to use the flag
+FFTW\_MEASURE, which usually leads to similar performance.
 
-NS3D's time stepping is significantly slower than \fluidpack{sim}'s time stepping
-(0.34 s $\simeq$ 20 \% slower). We did not find the performance issue in NS3D.
-
+Time stepping in NS3D is significantly slower than in \fluidpack{sim} (0.34 s
+$\simeq$ 20 \% slower). We did not find the performance issue in NS3D.
+%
 The linear operators are slightly faster in \fluidpack{sim} than in the Fortran
 code NS3D.  This is because this corresponds to Pythran functions written with
-explicit loops \cite[see][]{fluidfft}. There are also few unnecessary projections
-in NS3D (5 per time step in NS3D compared to 4 per time step in \fluidpack{sim}).
+explicit loops \cite[see][]{fluidfft}.
 
 Although the FFTs are a little bit faster for NS3D, the total time is slightly
 smaller (less than 1\% of the total time) for \fluidpack{sim} for this case.
 
-These examples do not show that fluidsim is always faster than ns3d or as fast as
-any very well optimized Fortran codes.  However, it proves that our very
+These examples do not prove that fluidsim is always faster than NS3D or as fast as
+any very well optimized Fortran codes.  However, they demonstrate that our very
 high-level and modular Python code is very efficient and is not slower than a
 well-optimized Fortran code.
 
 
-
 \section*{Quality control}
 
 % \textcolor{blue}{Detail the level of testing that has been carried out on the
@@ -947,7 +1036,7 @@
 valued at approximately 60\%.
 
 We also try to follow a consistent code style as recomended by PEP (Python
-enhancement proposals) --- 8 and 257. This is also inspected using lint
+enhancement proposals) 8 and 257. This is also inspected using lint
 checkers such as \codeinline{flake8} and \codeinline{pylint} among the
 developers.  The code is regularity cleaned up using the Python code formatter
 \codeinline{black}.
@@ -969,10 +1058,10 @@
 \section*{Dependencies}
 
 \begin{itemize}
-\item {\bf Minimum:} \fluidpack{dyn}, \pack{numpy}, \pack{h5py}, \fluidpack{fft},
-\pack{FFTW}.
-\item {\bf Optional:} \pack{scipy}, \pack{mpi4py}, \pack{cython} and
-\pack{pythran}, \pack{pulp}.
+\item {\bf Minimum:} \fluidpack{dyn}, \Numpy, \pack{h5py}, \fluidpack{fft}
+\cite[and FFT libraries, see][]{fluidfft}.
+\item {\bf Optional:} \Scipy, \pack{mpi4py}, \pack{Cython} and
+\pack{Pythran}, \pack{pulp}.
 \end{itemize}
 
 \section*{List of contributors}
@@ -1041,12 +1130,17 @@
 
 \fluidpack{sim} can be used is research and teaching to run numerical simulations
 and build new solvers.
+%
+The qualities and advantages of \fluidpack{sim} (integration with the Python
+ecosystem, documentation, unittests, versatility, efficiency and scalability) make
+us think that \fluidpack{sim} can become a true collaborative code.
 
 There is no formal support mechanism. However, bug reports can be submitted at
 the \href{https://bitbucket.org/fluiddyn/fluidsim/issues}{Issues page on
 Bitbucket}.  Discussions and questions can be aired on instant messaging
 channels in Riot (or equivalent with Matrix protocol) at
-\href{https://riot.im/app/#/room/#fluiddyn-users:matrix.org}{\codeinline{\#fluiddyn-users:matrix.org}}
+\href{https://riot.im/app/#/room/#fluiddyn-users:matrix.org}{\codeinline{%
+\#fluiddyn-users:matrix.org}}
 or via IRC protocol on Freenode at \codeinline{\#fluiddyn-users}.
 
 \section*{Acknowledgements}
diff --git a/jors.cls b/jors.cls
--- a/jors.cls
+++ b/jors.cls
@@ -108,8 +108,8 @@
 
 \newcommand{\fluiddyn}{\fluidpack{dyn}\xspace}
 
-\newcommand{\numpy}{\codeinline{numpy}\xspace}
-\newcommand{\scipy}{\codeinline{scipy}\xspace}
+\newcommand{\Numpy}{\codeinline{Numpy}\xspace}
+\newcommand{\Scipy}{\codeinline{Scipy}\xspace}
 
 \newcommand{\pack}[1]{\codeinline{#1}\xspace}