Commit cd9afeba authored by incardon's avatar incardon

Latest modules

parent 1c846340
......@@ -34,7 +34,8 @@ All notable changes to this project will be documented in this file.
become
CellList<3, double, Mem_fast<3, double>, shift<3, double>>
- getIterator in CellList changed getCellIterator
- Gris iterator types has changes (one additional template parameter)
- Grid iterator types has changes (one additional template parameter)
- FDScheme the constructor now has one parameter less (Parameter number 4 has been removed) (see Stokes_Flow examples in Numerics)
## [0.8.0] 28 February 2016
......
......@@ -4,13 +4,15 @@
/*! \page grid Grid
*
* \subpage grid_0_simple
* \subpage grid_1_save_load
* \subpage Grid_1_stencil
* \subpage Grid_2_solve_eq
* \subpage Grid_3_gs
* \subpage Grid_3_gs_3D
*
*/
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
[TOC]
......@@ -29,7 +31,7 @@
int main(int argc, char* argv[])
{
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* ## Initialization ## {#e0_s_initialization}
*
......@@ -55,18 +57,18 @@ int main(int argc, char* argv[])
size_t sz[3] = {100,100,100};
// Ghost part
Ghost<3,float> g(0.01);
Ghost<3,float> g(0.1);
//! \cond [initialization] \endcond
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* ## Grid instantiation ## {#e0_s_grid_inst}
*
* Here we are creating a distributed grid in defined by the following parameters
* Here we are creating a distributed grid defined by the following parameters
*
* * 3 dimensionality of the grid
* * float Type used for the spatial coordinates
* * float type used for the spatial coordinates
* * each grid point contain a vector of dimension 3 (float[3]),
* * float[3] is the information stored by each grid point a float[3]
* the list of properties must be put into an aggregate data structure aggregate<prop1,prop2,prop3, ... >
......@@ -88,13 +90,12 @@ int main(int argc, char* argv[])
//! \cond [grid instantiation] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Loop over grid points ## {#e0_s_loop_gp}
*
* Get an iterator that go through all the grid points. In this
* example we use iterators. Iterators are convenient way to explore/iterate data-structures in an
* convenient and easy way.
* example we use iterators. Iterators are convenient way to explore/iterate data-structures.
*
* \snippet Grid/0_simple/main.cpp get iterator
* \snippet Grid/0_simple/main.cpp get iterator2
......@@ -115,7 +116,7 @@ int main(int argc, char* argv[])
//! \cond [get iterator] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Grid coordinates ## {#e0_s_grid_coord}
*
......@@ -135,7 +136,7 @@ int main(int argc, char* argv[])
//! \cond [local grid] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* **Short explanation**
*
......@@ -144,7 +145,7 @@ int main(int argc, char* argv[])
*/
/*!
*
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
\htmlonly <a href="#" onclick="if (document.getElementById('long-explanation-div').style.display == 'none') {document.getElementById('long-explanation-div').style.display = 'block'} else {document.getElementById('long-explanation-div').style.display = 'none'}" >Long Explanation</a> \endhtmlonly
*
......@@ -185,7 +186,7 @@ int main(int argc, char* argv[])
PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
<ul>
<li>Global/Real coordinates are (3,2)</li>
<li>Global/Real coordinates (3,2)</li>
<li>Local grid coordinates are Sub-domain = 0, grid position = (0,0)</li>
</ul>
<p>Here we convert the local grid coordinates, into global coordinates. key_g internally store 3 integers that identify the position of the grid point in global coordinates</p>
......@@ -193,7 +194,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
</div>
\endhtmlonly
*/
/*! \page grid_0_simple Grid 0 simple
/*! \page grid_0_simple Simple usage
*
* \snippet Grid/0_simple/main.cpp global coord
*
......@@ -206,7 +207,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [global coord] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Assign properties ## {#grid_assign}
*
......@@ -230,8 +231,6 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [get iterator2] \endcond
//! ...
// next point
++dom;
}
......@@ -239,7 +238,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [get iterator2] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* Each sub-domain has an extended part, that is materially contained in
* another processor. The function ghost_get guarantee (after return) that this extended part
......@@ -256,7 +255,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [ghost get] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* count contain the number of points the local processor contain, if we are interested to count the total number across the processor
* we can use the function sum, to sum numbers across processors. First we have to get an instance of Vcluster, queue an operation of sum with
......@@ -285,7 +284,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [reduce] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## VTK and visualization ## {#e0_s_VTK_vis}
*
......@@ -293,6 +292,10 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
* The function write by default produce VTK files. One for each processor that can be visualized
* with the programs like paraview
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/0_simple_grid/0_simple_grid_init.jpg"/>
* \endhtmlonly
*
* \snippet Grid/0_simple/main.cpp write
*
*/
......@@ -304,7 +307,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [write] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Decomposition ## {#grid_dec}
*
......@@ -313,6 +316,15 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
*
* \snippet Grid/0_simple/main.cpp out_dec
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/0_simple_grid/0_simple_grid_dec.jpg"/>
* \endhtmlonly
*
* Here we see the decomposition in 3D for 2 processors. The red box in wire-frame is the processor 0
* subdomain. The blu one is the processor 1 sub-domain. The red solid box is the extended part for processor 0
* the blu solid part is the extended part for processor 1
*
*
*/
//! \cond [out_dec] \endcond
......@@ -322,7 +334,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [out_dec] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* ## Finalize ## {#finalize}
*
......@@ -339,7 +351,7 @@ PX,Y Processor X, sub-domain Y</pre><p>The point # has</p>
//! \cond [finalize] \endcond
/*!
* \page grid_0_simple Grid 0 simple
* \page grid_0_simple Simple usage
*
* # Full code # {#code}
*
......
......@@ -3,7 +3,7 @@
#include "Decomposition/CartDecomposition.hpp"
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
*
* # Stencil example and ghost # {#e1_st}
......@@ -15,7 +15,7 @@
*/
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Define some convenient constants and types
*
......@@ -37,7 +37,7 @@ constexpr size_t B = 0;
int main(int argc, char* argv[])
{
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Initialization ## {#e1_st_init}
*
......@@ -66,7 +66,7 @@ int main(int argc, char* argv[])
//! \cond [parameters] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Grid create ## {#e1_st_inst}
*
......@@ -89,7 +89,7 @@ int main(int argc, char* argv[])
//! \cond [grid] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Loop over grid points ## {#e1_s_loop_gp}
*
......@@ -112,7 +112,7 @@ int main(int argc, char* argv[])
//! \cond [iterator] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Inside the cycle we get the local grid key
*
......@@ -129,7 +129,7 @@ int main(int argc, char* argv[])
//! \cond [local key] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* We convert the local grid position, into global position, key_g contain 3 integers that identify the position
* of the grid point in global coordinates
......@@ -147,7 +147,7 @@ int main(int argc, char* argv[])
//! \cond [global key] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* we write on the grid point of position (i,j,k) the value i*i + j*j + k*k on the property A.
* Mathematically is equivalent to the function
......@@ -172,7 +172,7 @@ int main(int argc, char* argv[])
//! \cond [iterator2] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* ## Ghost ## {#e1_s_ghost}
*
......@@ -191,7 +191,7 @@ int main(int argc, char* argv[])
//! \cond [ghost] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Get again another iterator, iterate across all the domain points, calculating a Laplace stencil. Write the
* result on B
......@@ -220,7 +220,7 @@ int main(int argc, char* argv[])
//! \cond [laplacian] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
*
* Finally we want a nice output to visualize the information stored by the distributed grid
......@@ -238,7 +238,7 @@ int main(int argc, char* argv[])
//! \cond [output] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* Deinitialize the library
*
......@@ -253,7 +253,7 @@ int main(int argc, char* argv[])
//! \cond [finalize] \endcond
/*!
* \page Grid_1_stencil Grid 1 stencil
* \page Grid_1_stencil Stencil example
*
* # Full code # {#code}
*
......
......@@ -2,7 +2,7 @@
#include "data_type/aggregate.hpp"
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* [TOC]
*
......@@ -14,7 +14,11 @@
*
* \f$u(x,y) = 0 \f$
*
* at the boundary
* at the boundary. This lead to the solution shown in the picture
*
* \htmlonly
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/2_solve_eq/solution.jpg"/>
* \endhtmlonly
*
*
* ## Field initialization ## {#e2_se_finit}
......@@ -26,7 +30,7 @@
void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz)[2])
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* In order to initialize the field U, first we get an iterator that cover
* domain + Ghost to iterate all the grid points.
......@@ -47,7 +51,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [iterator] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Get the local grid key
*
......@@ -64,7 +68,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [local key] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
*
* Here we convert the local grid position, into global position.
......@@ -84,7 +88,7 @@ void init(grid_dist_id<2,double,aggregate<double> > & g_dist, const size_t (& sz
//! \cond [global key] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Initialize to 0, domain + boundary
*
......@@ -129,7 +133,7 @@ constexpr int y = 1;
int main(int argc, char* argv[])
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Initialization ##
*
......@@ -148,7 +152,7 @@ int main(int argc, char* argv[])
//! \cond [ofp_init] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Grid instantiation and initialization ##
*
......@@ -174,7 +178,7 @@ int main(int argc, char* argv[])
//! \cond [ofp_par] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Create a distributed grid in 2D (1° template parameter) space in with double precision (2° template parameter)
* each grid point contain a scalar (double),
......@@ -202,7 +206,7 @@ int main(int argc, char* argv[])
//! \cond [grid inst] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Initialize U and fill the boundary conditions
*
......@@ -219,7 +223,7 @@ int main(int argc, char* argv[])
//! \cond [grid init] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## %Ghost synchronization ##
*
......@@ -238,7 +242,7 @@ int main(int argc, char* argv[])
//! \cond [ghost sync] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Red-Black alghorithm ##
*
......@@ -257,7 +261,7 @@ int main(int argc, char* argv[])
for (size_t i = 0 ; i < 10000 ; i++)
{
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* Get an iterator that go through the points of the grid (No ghost)
* To compute one iteration.
......@@ -312,7 +316,7 @@ int main(int argc, char* argv[])
//! \cond [gs_it] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
*
* Once an iteration is done we have to synchronize the ghosts
......@@ -336,7 +340,7 @@ int main(int argc, char* argv[])
}
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* The full Algorithm look like this
*
......@@ -400,7 +404,7 @@ int main(int argc, char* argv[])
//! \cond [sol stat] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## VTK Write and visualization ##
*
......@@ -422,7 +426,7 @@ int main(int argc, char* argv[])
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* ## Finalize ##
*
......@@ -439,9 +443,9 @@ int main(int argc, char* argv[])
//! \cond [finalize] \endcond
/*!
* \page Grid_2_solve_eq Grid 2 solve eq
* \page Grid_2_solve_eq Solve equation
*
* # Full code # {#code}
* # Full code # {#e2_solve_eq_code}
*
* \include Grid/2_solve_eq/main.cpp
*
......
......@@ -3,7 +3,7 @@
#include "timer.hpp"
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* # Solving a gray scott-system # {#e3_gs_gray_scott}
*
......@@ -34,7 +34,7 @@ constexpr int y = 1;
//! \cond [constants] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* We also define an init function. This function initialize the species U and V. In the following we are going into the
* detail of this function
......@@ -52,7 +52,7 @@ void init(grid_dist_id<2,double,aggregate<double,double> > & Old, grid_dist_id<2
//! \cond [init fun] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* Here we initialize for the full domain. U and V itarating across the grid points. For the calculation
* We are using 2 grids one Old and New. We initialize Old with the initial condition concentration of the
......@@ -86,7 +86,7 @@ void init(grid_dist_id<2,double,aggregate<double,double> > & Old, grid_dist_id<2
//! \cond [init uv] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* After we initialized the full grid, we create a perturbation in the domain with different values.
* We do in the part of space: 1.55 < x < 1.85 and 1.55 < y < 1.85. Or more precisely on the points included
......@@ -125,7 +125,7 @@ void init(grid_dist_id<2,double,aggregate<double,double> > & Old, grid_dist_id<2
int main(int argc, char* argv[])
{
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* ## Initialization ##
*
......@@ -185,7 +185,7 @@ int main(int argc, char* argv[])
//! \cond [init lib] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* Here we create 2 distributed grid in 2D Old and New. In particular because we want that
* the second grid is distributed across processors in the same way we pass the decomposition
......@@ -210,7 +210,7 @@ int main(int argc, char* argv[])
//! \cond [init grid] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* We use the function init to initialize U and V on the grid Old
*
......@@ -225,7 +225,7 @@ int main(int argc, char* argv[])
//! \cond [init uvc] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* ## Time stepping ##
*
......@@ -323,7 +323,7 @@ int main(int argc, char* argv[])
//! \cond [time stepping] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs Gray Scott 2D
*
* ## Finalize ##
*
......
......@@ -3,7 +3,7 @@
#include "timer.hpp"
/*!
* \page Grid_3_gs Grid 3 Gray Scott in 3D
* \page Grid_3_gs_3D Grid 3 Gray Scott in 3D
*
* # Solving a gray scott-system in 3D # {#e3_gs_gray_scott}
*
......@@ -107,7 +107,7 @@ int main(int argc, char* argv[])
//! \cond [init lib] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs_3D Grid 3 Gray Scott
*
* Here we create 2 distributed grid in 2D Old and New. In particular because we want that
* the second grid is distributed across processors in the same way we pass the decomposition
......@@ -208,7 +208,7 @@ int main(int argc, char* argv[])
//! \cond [time stepping] \endcond
/*!
* \page Grid_3_gs Grid 3 Gray Scott
* \page Grid_3_gs_3D Grid 3 Gray Scott
*
* ## Finalize ##
*
......
......@@ -300,7 +300,7 @@ int main(int argc, char* argv[])
Ghost<2,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist.getGridInfo(), g_dist);
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
......
......@@ -291,7 +291,7 @@ int main(int argc, char* argv[])
Ghost<2,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist.getGridInfo(), g_dist);
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
......@@ -383,17 +383,13 @@ int main(int argc, char* argv[])
// Create a PETSC solver
petsc_solver<double> solver;
// Warning try many solver and collect statistics require a lot of time
// To just solve you can comment this line
solver.best_solve();
// Set the maxumum nunber if iterations
solver.setMaxIter(1000);
solver.setRestart(200);
// Give to the solver A and b, return x, the solution
auto x = solver.solve(fd.getA(),fd.getB());
auto x = solver.try_solve(fd.getA(),fd.getB());
//! \cond [solver] \endcond
......
......@@ -318,7 +318,7 @@ int main(int argc, char* argv[])
Ghost<3,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist.getGridInfo(), g_dist);
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
......
......@@ -319,7 +319,7 @@ int main(int argc, char* argv[])
Ghost<3,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist.getGridInfo(), g_dist);
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
......
......@@ -876,6 +876,107 @@ void euler_int(particles & vd, double dt)
/*! \cond [verlet_int] \endcond */
/*!
*
* \page Vector_7_sph_dlb Vector 7 SPH Dam break simulation with Dynamic load balancing
*
* ### Probes/sensors {#e7_sph_prob_sens}
*
* This function show how to create a pressure sensor/probe on a set of specified points. To do this
* from the cell-list we just get an iterator across the neighborhood points of the sensors and we
* calculate the pressure profile. On the other hand because the sensor is in the processor domain
* of only one processor, only one processor must do this calculation. We will use the function isLocal
* to determine which processor contain the probe and only such processor will do the calculation.
*
* \warning This type of calculation is suitable if the number of probes is small (like 10) and pressure is not
* calculated every time step. In case the number of
* probes is comparable to the number of particles or the pressure is calculated every time-step than we suggest
* to create a set of "probe" particles
*
*
* \snippet Vector/7_SPH_dlb/main.cpp sens_press
*
*
*/
/*! \cond [sens_press] \endcond */
template<typename Vector, typename CellList>
inline void sensor_pressure(Vector & vd,