Commit 01ba5a24 authored by incardon's avatar incardon
Browse files

Fixing conflict for release 1.0.0

parents 1eb20820 0a935839
# Change Log
All notable changes to this project will be documented in this file.
## [0.9.0]
### Added
- Introduced getDomainIterator for Cell-list
- Example to show how to add sensors in SPH/particle based methods (see Vector/7_SPH_opt)
- Increased performance of 7_SPH_opt
- Vortex in Cell example Numerics/Vortex_in_cell
- Interpolation functions (see Numerics/vortex_in_cell example)
- Gray-scott 3d example with stencil iterator optimixation (see Grid/gray_scott_3d example)
- HDF5 Check point restart for vector_dist particles (see Vector/1_HDF5_save_and_load)
- Raw reader for grid (see ...)
- A way to specify names for properties and select properties to write (in PROGRESS)
- Ghost put on grid (see Vortex in Cell example)
- getDomainIterator stencil for faster stencil codes iterators see (Grid/gray_scott_3d example)
- Algebraic multigrid solvers interface for linear systems (see Vortex in Cell example)
- Added setPropNames in vector_dist see Vector/0_simple
### Fixed
- Installation of PETSC in case with MUMPS try without MUMPS
- In case of miss compilation ignore system wide installation
- 2 Bugs in 7_SPH_opt and 7_SPH_opt error in Kernel and update for boundary particles
- Bug in VTK writer binary in case of vectors
- Bug in VTK writer binary: long int are not supported removing output
- Bug in FDScheme in the constructor with stencil bigger than one
- Bug Fixed Memory leak in petsc solver
- Bug Performance bug in the grid iterator
### Changed
- CellList types has changed for example
CellList<3, double, FAST, shift<3,double>>
become
CellList<3, double, Mem_fast<3, double>, shift<3, double>>
- getIterator in CellList changed getCellIterator
- Grid iterator types has changes (one additional template parameter)
- FDScheme the constructor now has one parameter less (Parameter number 4 has been removed) (see Stokes_Flow examples in Numerics)
## [0.8.0] 28 February 2016
### Added
......
#!groovy
node ('windows10')
{
deleteDir()
checkout scm
stage ('build_win10')
{
sh "./build_sec_OS.sh $WORKSPACE $NODE_NAME pdata"
}
// stage ('run_taurus')
// {
// ./run.sh $WORKSPACE $NODE_NAME 24 1 24"
// }
}
......@@ -15,10 +15,10 @@ parallel (
stage ('run_gin')
{
parallel (
"1" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 1"},
"2" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 2"},
"3" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 3"},
"4" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 4"}
"1" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 1 0 0 numerics"},
"2" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 2 0 0 numerics"},
"3" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 3 0 0 numerics"},
"4" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 4 0 0 numerics"}
)
sh "./success.sh 2 gin openfpm_numerics"
}
......@@ -41,10 +41,10 @@ parallel (
stage ('run_sb15')
{
parallel (
"1" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 1"},
"2" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 2"},
"3" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 3"},
"4" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 4"}
"1" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 1 0 0 numerics"},
"2" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 2 0 0 numerics"},
"3" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 3 0 0 numerics"},
"4" : {sh "cd openfpm_numerics && ./run.sh $WORKSPACE $NODE_NAME 4 0 0 numerics"}
)
sh "./success.sh 2 sbalzarini-mac-15 openfpm_numerics"
}
......
......@@ -26,7 +26,7 @@ then
if [ ! -d $HOME/$5/MPI ]; then
echo "COPY MPICH"
cp -R $HOME/MPI $HOME/$5/MPI
echo 1 > $HOME/$5/MPI/version
echo 2 > $HOME/$5/MPI/version
fi
### Activate MPI and binutils ###
......@@ -40,9 +40,10 @@ then
mv $HOME/openfpm_vars $HOME/openfpm_vars_$5
source $HOME/openfpm_vars_$5
elif [ x"$3" == x"numerics" ]; then
CC=gcc-4.9.2 CXX=g++-4.9.2 FC=gfortran-4.9.2 F77=gfortran-4.9.2 ./install -i $HOME/$5 -m -s -c "--prefix=/home/jenkins/openfpm_install"
mv $HOME/openfpm_vars $HOME/openfpm_vars_$5
source $HOME/openfpm_vars_$5
branch=$(git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3)
CC=gcc-4.9.2 CXX=g++-4.9.2 FC=gfortran-4.9.2 F77=gfortran-4.9.2 ./install -i $HOME/$branch -m -s -c "--prefix=/home/jenkins/openfpm_install"
mv $HOME/openfpm_vars $HOME/openfpm_vars_$branch
source $HOME/openfpm_vars_$branch
make $3
else
CC=gcc-4.9.2 CXX=g++-4.9.2 FC=gfortran-4.9.2 F77=gfortran-4.9.2 ./install -i $HOME/$5 -m -s -c "--prefix=/home/jenkins/openfpm_install --no-recursion"
......@@ -98,9 +99,9 @@ else
mv $HOME/openfpm_vars $HOME/openfpm_vars_$5
source $HOME/openfpm_vars_$5
elif [ x"$3" == x"numerics" ]; then
./install -i $HOME/$5 -m -s -c "--prefix=/home/jenkins/openfpm_install"
mv $HOME/openfpm_vars $HOME/openfpm_vars_$5
source $HOME/openfpm_vars_$5
branch=$(git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3)
./install -i $HOME/$branch -m -s -c "--prefix=/home/jenkins/openfpm_install"
source $HOME/openfpm_vars_$branch
make $3
else
./install -i $HOME/$5 -m -s -c "--prefix=/Users/jenkins/openfpm_install --no-recursion"
......
......@@ -27,15 +27,21 @@ then
echo "Compiling on gin\n"
source ~/.bashrc
module load gcc/4.9.2
if [ x"$3" == x"numerics" ]; then
echo "Installing for numerics"
branch=$(git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3)
./install -i $HOME/$branch -s -c "--prefix=/home/jenkins/openfpm_install"
make
else
mkdir $HOME/$4
./install -i $HOME/$4 -s -c "--prefix=/home/jenkins/openfpm_install"
make
make install
fi
if [ $? -ne 0 ]; then
curl -X POST --data "payload={\"icon_emoji\": \":jenkins:\", \"username\": \"jenkins\" , \"attachments\":[{ \"title\":\"Error:\", \"color\": \"#FF0000\", \"text\":\"$2 failed to complete the openfpm_pdata test \" }] }" https://hooks.slack.com/services/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
exit 1 ;
fi
make install
source $HOME/openfpm_vars
if [ x"$3" == x"no_test" ]; then
......@@ -244,9 +250,17 @@ then
else
echo "Compiling general"
source ~/.bashrc
if [ x"$3" == x"numerics" ]; then
echo "Installing for numerics"
branch=$(git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3)
./install -i $HOME/$branch -s -c "--prefix=/home/jenkins/openfpm_install"
make
else
mkdir $HOME/$4
./install -i $HOME/$4 -s
make
fi
if [ $? -ne 0 ]; then
curl -X POST --data "payload={\"icon_emoji\": \":jenkins:\", \"username\": \"jenkins\" , \"attachments\":[{ \"title\":\"Error:\", \"color\": \"#FF0000\", \"text\":\"$2 failed to complete the openfpm_pdata test \" }] }" https://hooks.slack.com/services/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
......
......@@ -12,12 +12,13 @@ if [ "$2" == "windows10" ]; then
echo "Compiling on windows10"
echo "1" > input_install
echo "1" >> input_install
echo "1" >> input_install
echo "2" >> input_install
echo "y" >> input_install
echo "1" >> input_install
branch=$(git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3)
./install -i "/scratch/p_ppm/openfpm_deps_intel" < input_install
./install -i "/home/jenkins/$branch" < input_install
fi
......
......@@ -4,7 +4,7 @@
## Take all the options with the exception of --enable-install-req
AC_PREREQ(2.59)
AC_INIT(OpenFPM_pdata, 0.8.0, BUG-REPORT-ADDRESS)
AC_INIT(OpenFPM_pdata, 0.9.0, BUG-REPORT-ADDRESS)
AC_CANONICAL_SYSTEM
AC_CONFIG_SRCDIR([src/main.cpp])
AC_CONFIG_SUBDIRS([openfpm_data openfpm_devices openfpm_vcluster openfpm_io openfpm_numerics])
......@@ -191,6 +191,7 @@ AC_ARG_ENABLE(test-performance,
[enable test performance]
),
test_per="$enableval"
INCLUDES_PATH+="-I../openfpm_numerics/src "
)
......
......@@ -4,13 +4,15 @@
/*! \page grid Grid
*
* \subpage grid_0_simple
* \subpage grid_1_save_load
* \subpage Grid_1_stencil
* \subpage Grid_2_solve_eq
* \subpage Grid_3_gs
* \subpage Grid_3_gs_3D
*
*/
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
[TOC]
......@@ -29,7 +31,7 @@
int main(int argc, char* argv[])
{
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* ## Initialization ## {#e0_s_initialization}
*
......@@ -55,18 +57,18 @@ int main(int argc, char* argv[])
size_t sz[3] = {100,100,100};
// Ghost part
Ghost<3,float> g(0.01);
Ghost<3,float> g(0.1);
//! \cond [initialization] \endcond
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* ## Grid instantiation ## {#e0_s_grid_inst}
*
* Here we are creating a distributed grid in defined by the following parameters
* Here we are creating a distributed grid defined by the following parameters
*
* * 3 dimensionality of the grid
* * float Type used for the spatial coordinates
* * float type used for the spatial coordinates
* * each grid point contain a vector of dimension 3 (float[3]),
* * float[3] is the information stored by each grid point a float[3]
* the list of properties must be put into an aggregate data structure aggregate<prop1,prop2,prop3, ... >
......@@ -88,13 +90,12 @@ int main(int argc, char* argv[])
//! \cond [grid instantiation] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Loop over grid points ## {#e0_s_loop_gp}
*
* Get an iterator that go through all the grid points. In this
* example we use iterators. Iterators are convenient way to explore/iterate data-structures in an
* convenient and easy way.
* example we use iterators. Iterators are convenient way to explore/iterate data-structures.
*
* \snippet Grid/0_simple/main.cpp get iterator
* \snippet Grid/0_simple/main.cpp get iterator2
......@@ -115,7 +116,7 @@ int main(int argc, char* argv[])
//! \cond [get iterator] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Grid coordinates ## {#e0_s_grid_coord}
*
......@@ -135,7 +136,7 @@ int main(int argc, char* argv[])
//! \cond [local grid] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* **Short explanation**
*
......@@ -144,7 +145,7 @@ int main(int argc, char* argv[])
*/
/*!
*
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
\htmlonly <a href="#" onclick="if (document.getElementById('long-explanation-div').style.display == 'none') {document.getElementById('long-explanation-div').style.display = 'block'} else {document.getElementById('long-explanation-div').style.display = 'none'}" >Long Explanation</a> \endhtmlonly
*
......@@ -185,7 +186,7 @@ int main(int argc, char* argv[])
PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
<ul>
<li>Global/Real coordinates are (3,2)</li>
<li>Global/Real coordinates (3,2)</li>
<li>Local grid coordinates are Sub-domain = 0, grid position = (0,0)</li>
</ul>
<p>Here we convert the local grid coordinates, into global coordinates. key_g internally store 3 integers that identify the position of the grid point in global coordinates</p>
......@@ -193,7 +194,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
</div>
\endhtmlonly
*/
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* \snippet Grid/0_simple/main.cpp global coord
*
......@@ -206,7 +207,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [global coord] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Assign properties ## {#grid_assign}
*
......@@ -230,8 +231,6 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [get iterator2] \endcond
//! ...
// next point
++dom;
}
......@@ -239,7 +238,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [get iterator2] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* Each sub-domain has an extended part, that is materially contained in
* another processor. The function ghost_get guarantee (after return) that this extended part
......@@ -256,7 +255,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [ghost get] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* count contain the number of points the local processor contain, if we are interested to count the total number across the processor
* we can use the function sum, to sum numbers across processors. First we have to get an instance of Vcluster, queue an operation of sum with
......@@ -285,7 +284,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [reduce] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## VTK and visualization ## {#e0_s_VTK_vis}
*
......@@ -293,6 +292,10 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
* The function write by default produce VTK files. One for each processor that can be visualized
* with the programs like paraview
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/0_simple_grid/0_simple_grid_init.jpg"/>
* \endhtmlonly
*
* \snippet Grid/0_simple/main.cpp write
*
*/
......@@ -304,7 +307,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [write] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Decomposition ## {#grid_dec}
*
......@@ -313,6 +316,15 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
*
* \snippet Grid/0_simple/main.cpp out_dec
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/0_simple_grid/0_simple_grid_dec.jpg"/>
* \endhtmlonly
*
* Here we see the decomposition in 3D for 2 processors. The red box in wire-frame is the processor 0
* subdomain. The blu one is the processor 1 sub-domain. The red solid box is the extended part for processor 0
* the blu solid part is the extended part for processor 1
*
*
*/
//! \cond [out_dec] \endcond
......@@ -322,7 +334,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [out_dec] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Finalize ## {#finalize}
*
......@@ -339,7 +351,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [finalize] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* # Full code # {#code}
*
......
......@@ -3,7 +3,7 @@
#include "Decomposition/CartDecomposition.hpp"
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
*
* # Stencil example and ghost # {#e1_st}
......@@ -15,7 +15,7 @@
*/
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Define some convenient constants and types
*
......@@ -37,7 +37,7 @@ constexpr size_t B = 0;
int main(int argc, char* argv[])
{
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Initialization ## {#e1_st_init}
*
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
//! \cond [parameters] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Grid create ## {#e1_st_inst}
*
......@@ -89,7 +89,7 @@ int main(int argc, char* argv[])
//! \cond [grid] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Loop over grid points ## {#e1_s_loop_gp}
*
......@@ -112,7 +112,7 @@ int main(int argc, char* argv[])
//! \cond [iterator] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Inside the cycle we get the local grid key
*
......@@ -129,7 +129,7 @@ int main(int argc, char* argv[])
//! \cond [local key] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* We convert the local grid position, into global position, key_g contain 3 integers that identify the position
* of the grid point in global coordinates
......@@ -147,7 +147,7 @@ int main(int argc, char* argv[])
//! \cond [global key] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* we write on the grid point of position (i,j,k) the value i*i + j*j + k*k on the property A.
* Mathematically is equivalent to the function
......@@ -172,7 +172,7 @@ int main(int argc, char* argv[])
//! \cond [iterator2] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Ghost ## {#e1_s_ghost}
*
......@@ -191,7 +191,7 @@ int main(int argc, char* argv[])
//! \cond [ghost] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Get again another iterator, iterate across all the domain points, calculating a Laplace stencil. Write the
* result on B
......@@ -220,7 +220,7 @@ int main(int argc, char* argv[])
//! \cond [laplacian] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
*
* Finally we want a nice output to visualize the information stored by the distributed grid
......@@ -238,7 +238,7 @@ int main(int argc, char* argv[])
//! \cond [output] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Deinitialize the library
*
......@@ -253,7 +253,7 @@ int main(int argc, char* argv[])
//! \cond [finalize] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* # Full code # {#code}
*
......
......@@ -2,7 +2,7 @@
#include "data_type/aggregate.hpp"
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* [TOC]
*
......@@ -14,7 +14,11 @@
*
* \f$u(x,y) = 0 \f$
*
* at the boundary
* at the boundary. This lead to the solution shown in the picture
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/2_solve_eq/solution.jpg"/>
* \endhtmlonly
*
*
* ## Field initialization ## {#e2_se_finit}
......@@ -26,7 +30,7 @@
void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz)[2])
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* In order to initialize the field U, first we get an iterator that cover
* domain + Ghost to iterate all the grid points.
......@@ -47,7 +51,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [iterator] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Get the local grid key
*
......@@ -64,7 +68,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [local key] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
*
* Here we convert the local grid position, into global position.
......@@ -84,7 +88,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [global key] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Initialize to 0, domain + boundary
*
......@@ -129,7 +133,7 @@ constexpr int y = 1;
int main(int argc, char* argv[])
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Initialization ##
*
......@@ -148,7 +152,7 @@ int main(int argc, char* argv[])
//! \cond [ofp_init] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Grid instantiation and initialization ##
*
......@@ -174,7 +178,7 @@ int main(int argc, char* argv[])
//! \cond [ofp_par] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Create a distributed grid in 2D (1° template parameter) space in with double precision (2° template parameter)
* each grid point contain a scalar (double),
......@@ -202,7 +206,7 @@ int main(int argc, char* argv[])
//! \cond [grid inst] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Initialize U and fill the boundary conditions
*
......@@ -219,7 +223,7 @@ int main(int argc, char* argv[])
//! \cond [grid init] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## %Ghost synchronization ##
*
......@@ -238,7 +242,7 @@ int main(int argc, char* argv[])
//! \cond [ghost sync] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Red-Black alghorithm ##
*
......@@ -257,7 +261,7 @@ int main(int argc, char* argv[])
for (size_t i = 0 ; i < 10000 ; i++)
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Get an iterator that go through the points of the grid (No ghost)
* To compute one iteration.
......@@ -312,7 +316,7 @@ int main(int argc, char* argv[])
//! \cond [gs_it] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
*
* Once an iteration is done we have to synchronize the ghosts
......@@ -336,7 +340,7 @@ int main(int argc, char* argv[])