Commit be2e3215 authored by foggia's avatar foggia

testing average in parallel

parent 807a2f80
......@@ -42,36 +42,37 @@
#include "FiniteDifference/eq.hpp"
#include "Solvers/petsc_solver.hpp"
#include "Solvers/petsc_solver.hpp"
#include "FiniteDifference/operators.hpp"
struct lid_nn
{
// dimensionaly of the equation (2D problem 3D problem ...)
static const unsigned int dims = 2;
// dimensionaly of the equation (2D problem 3D problem ...)
static const unsigned int dims = 2;
// number of fields in the system v_x, v_y, P so a total of 3
static const unsigned int nvar = 3;
// number of fields in the system v_x, v_y, P so a total of 3
static const unsigned int nvar = 3;
// boundary conditions PERIODIC OR NON_PERIODIC
static const bool boundary[];
// boundary conditions PERIODIC OR NON_PERIODIC
static const bool boundary[];
// type of space float, double, ...
typedef float stype;
// type of space float, double, ...
typedef float stype;
// type of base grid, it is the distributed grid that will store the result
// Note the first property is a 2D vector (velocity), the second is a scalar (Pressure)
typedef grid_dist_id<2,float,aggregate<float[2],float>,CartDecomposition<2,float>> b_grid;
// type of base grid, it is the distributed grid that will store the result
// Note the first property is a 2D vector (velocity), the second is a scalar (Pressure)
typedef grid_dist_id<2,float,aggregate<float[2],float>,CartDecomposition<2,float>> b_grid;
// type of SparseMatrix, for the linear system, this parameter is bounded by the solver
// that you are using, in case of umfpack using <double,int> it is the only possible choice
// Here we choose PETSC implementation
typedef SparseMatrix<double,int,PETSC_BASE> SparseMatrix_type;
// type of SparseMatrix, for the linear system, this parameter is bounded by the solver
// that you are using, in case of umfpack using <double,int> it is the only possible choice
// Here we choose PETSC implementation
typedef SparseMatrix<double,int,PETSC_BASE> SparseMatrix_type;
// type of Vector for the linear system, this parameter is bounded by the solver
// that you are using
typedef Vector<double,PETSC_BASE> Vector_type;
// type of Vector for the linear system, this parameter is bounded by the solver
// that you are using
typedef Vector<double,PETSC_BASE> Vector_type;
// Define that the underline grid where we discretize the system of equation is staggered
static const int grid_type = STAGGERED_GRID;
// Define that the underline grid where we discretize the system of equation is staggered
static const int grid_type = STAGGERED_GRID;
};
const bool lid_nn::boundary[] = {NON_PERIODIC,NON_PERIODIC};
......@@ -111,14 +112,6 @@ const bool lid_nn::boundary[] = {NON_PERIODIC,NON_PERIODIC};
//! \cond [def equation] \endcond
// Constant Field
struct eta
{
typedef void const_field;
static float val() {return 1.0;}
};
// Convenient constants
constexpr unsigned int v[] = {0,1};
constexpr unsigned int P = 2;
......@@ -126,31 +119,6 @@ constexpr unsigned int ic = 2;
constexpr int x = 0;
constexpr int y = 1;
// Create field that we have v_x, v_y, P
typedef Field<v[x],lid_nn> v_x; // Definition 1 v_x
typedef Field<v[y],lid_nn> v_y; // Definition 2 v_y
typedef Field<P,lid_nn> Prs; // Definition 3 Prs
// Eq1 V_x
typedef mul<eta,Lap<v_x,lid_nn>,lid_nn> eta_lap_vx; // Step 1
typedef D<x,Prs,lid_nn> p_x; // Step 2
typedef minus<p_x,lid_nn> m_p_x; // Step 3
typedef sum<eta_lap_vx,m_p_x,lid_nn> vx_eq; // Step 4
// Eq2 V_y
typedef mul<eta,Lap<v_y,lid_nn>,lid_nn> eta_lap_vy;
typedef D<y,Prs,lid_nn> p_y;
typedef minus<p_y,lid_nn> m_p_y;
typedef sum<eta_lap_vy,m_p_y,lid_nn> vy_eq;
// Eq3 Incompressibility
typedef D<x,v_x,lid_nn,FORWARD> dx_vx; // Step 5
typedef D<y,v_y,lid_nn,FORWARD> dy_vy; // Step 6
typedef sum<dx_vx,dy_vy,lid_nn> ic_eq; // Step 7
//! \cond [def equation] \endcond
/*!
......@@ -160,19 +128,19 @@ typedef sum<dx_vx,dy_vy,lid_nn> ic_eq; // Step 7
* at boundaries. Explain in detail is out of the scope of this example, but to qualitatively
* have an idea consider the following staggered cell
*
\verbatim
\verbatim
+--$--+
| |
# * #
| |
0--$--+
+--$--+
| |
# * #
| |
0--$--+
# = velocity(x)
$ = velocity(y)
* = pressure
# = velocity(x)
$ = velocity(y)
* = pressure
\endverbatim
\endverbatim
*
* As we can see several properties has different position in the cell.
* Consider we want to impose \f$v_y = 0\f$ on \f$x=0\f$. Because there are not
......@@ -187,15 +155,6 @@ typedef sum<dx_vx,dy_vy,lid_nn> ic_eq; // Step 7
//! \cond [bond def eq] \endcond
// Equation for boundary conditions
// Directional Avg
typedef Avg<x,v_y,lid_nn> avg_vy;
typedef Avg<y,v_x,lid_nn> avg_vx;
typedef Avg<x,v_y,lid_nn,FORWARD> avg_vy_f;
typedef Avg<y,v_x,lid_nn,FORWARD> avg_vx_f;
// Usefull constants (as MACRO)
#define EQ_1 0
#define EQ_2 1
......@@ -208,241 +167,275 @@ typedef Avg<y,v_x,lid_nn,FORWARD> avg_vx_f;
int main(int argc, char* argv[])
{
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Initialization ## {#num_sk_inc_2D_ps_init}
*
* After model our equation we:
* * Initialize the library
* * Define some useful constants
* * define Ghost size
* * Non-periodic boundary conditions
* * Padding domain expansion
*
* Padding and Ghost differ in the fact the padding extend the domain.
* Ghost is an extension for each sub-domain
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp init
*
*/
//! \cond [init] \endcond
// Initialize
openfpm_init(&argc,&argv);
// velocity in the grid is the property 0, pressure is the property 1
constexpr int velocity = 0;
constexpr int pressure = 1;
// Domain, a rectangle
Box<2,float> domain({0.0,0.0},{12.0,4.0});
// Ghost (Not important in this case but required)
Ghost<2,float> g(0.01);
// Grid points on x=96 and y=32
long int sz[] = {96,32};
size_t szu[2];
szu[0] = (size_t)sz[0];
szu[1] = (size_t)sz[1];
// We need one more point on the left and down part of the domain
// This is given by the boundary conditions that we impose.
//
Padding<2> pd({1,1},{0,0});
//! \cond [init] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* Distributed grid that store the solution
*
* \see \ref e0_s_grid_inst
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp grid inst
*
*/
//! \cond [grid inst] \endcond
grid_dist_id<2,float,aggregate<float[2],float>> g_dist(szu,domain,g);
//! \cond [grid inst] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* Solving the system above require the solution of a system like that
*
* \f$ Ax = b \quad x = A^{-1}b\f$
*
* where A is the system the discretize the left hand side of the equations + boundary conditions
* and b discretize the right hand size + boundary conditions
*
* FDScheme is the object that we use to produce the Matrix A and the vector b.
* Such object require the maximum extension of the stencil
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp fd scheme
*
*/
//! \cond [fd scheme] \endcond
// It is the maximum extension of the stencil (order 2 laplacian stencil has extension 1)
Ghost<2,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Impose the equation on the domain ## {#num_sk_inc_2D_ps_ied}
*
* Here we impose the system of equation, we start from the incompressibility Eq imposed in the bulk with the
* exception of the first point {0,0} and than we set P = 0 in {0,0}, why we are doing this is again
* mathematical to have a well defined system, an intuitive explanation is that P and P + c are both
* solution for the incompressibility equation, this produce an ill-posed problem to make it well posed
* we set one point in this case {0,0} the pressure to a fixed constant for convenience P = 0
*
* The best way to understand what we are doing is to draw a smaller example like 8x8.
* Considering that we have one additional point on the left for padding we have a grid
* 9x9. If on each point we have v_x v_y and P unknown we have
* 9x9x3 = 243 unknown. In order to fully determine and unique solution we have to
* impose 243 condition. The code under impose (in the case of 9x9) between domain
* and bulk 243 conditions.
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp impose eq dom
*
*
*/
//! \cond [impose eq dom] \endcond
fd.impose(ic_eq(),0.0, EQ_3, {0,0},{sz[0]-2,sz[1]-2},true);
fd.impose(Prs(), 0.0, EQ_3, {0,0},{0,0});
// Here we impose the Eq1 and Eq2
fd.impose(vx_eq(),0.0, EQ_1, {1,0},{sz[0]-2,sz[1]-2});
fd.impose(vy_eq(),0.0, EQ_2, {0,1},{sz[0]-2,sz[1]-2});
// v_x and v_y
// Imposing B1
fd.impose(v_x(),0.0, EQ_1, {0,0},{0,sz[1]-2});
fd.impose(avg_vy_f(),0.0, EQ_2 , {-1,0},{-1,sz[1]-1});
// Imposing B2
fd.impose(v_x(),0.0, EQ_1, {sz[0]-1,0},{sz[0]-1,sz[1]-2});
fd.impose(avg_vy(),1.0, EQ_2, {sz[0]-1,0},{sz[0]-1,sz[1]-1});
// Imposing B3
fd.impose(avg_vx_f(),0.0, EQ_1, {0,-1},{sz[0]-1,-1});
fd.impose(v_y(), 0.0, EQ_2, {0,0},{sz[0]-2,0});
// Imposing B4
fd.impose(avg_vx(),0.0, EQ_1, {0,sz[1]-1},{sz[0]-1,sz[1]-1});
fd.impose(v_y(), 0.0, EQ_2, {0,sz[1]-1},{sz[0]-2,sz[1]-1});
// When we pad the grid, there are points of the grid that are not
// touched by the previous condition. Mathematically this lead
// to have too many variables for the conditions that we are imposing.
// Here we are imposing variables that we do not touch to zero
//
// Padding pressure
fd.impose(Prs(), 0.0, EQ_3, {-1,-1},{sz[0]-1,-1});
fd.impose(Prs(), 0.0, EQ_3, {-1,sz[1]-1},{sz[0]-1,sz[1]-1});
fd.impose(Prs(), 0.0, EQ_3, {-1,0},{-1,sz[1]-2});
fd.impose(Prs(), 0.0, EQ_3, {sz[0]-1,0},{sz[0]-1,sz[1]-2});
// Impose v_x Padding Impose v_y padding
fd.impose(v_x(), 0.0, EQ_1, {-1,-1},{-1,sz[1]-1});
fd.impose(v_y(), 0.0, EQ_2, {-1,-1},{sz[0]-1,-1});
//! \cond [impose eq dom] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Solve the system of equation ## {#num_sk_inc_2D_sse}
*
* Once we imposed all the equations we can retrieve the Matrix A and the vector b
* and pass these two element to the solver. In this example we are using PETSC solvers
* direct/Iterative solvers. While Umfpack
* has only one solver, PETSC wrap several solvers. The function best_solve set the solver in
* the modality to try multiple solvers to solve your system. The subsequent call to solve produce a report
* of all the solvers tried comparing them in error-convergence and speed. If you do not use
* best_solve try to solve your system with the default solver GMRES (That is the most robust iterative solver
* method)
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp solver
*
*/
//! \cond [solver] \endcond
// Create a PETSC solver
petsc_solver<double> solver;
// Set the maxumum nunber if iterations
solver.setMaxIter(1000);
solver.setRestart(200);
// Give to the solver A and b, return x, the solution
auto x = solver.try_solve(fd.getA(),fd.getB());
//! \cond [solver] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Copy the solution on the grid and write on VTK ## {#num_sk_inc_2D_ps_csg}
*
* Once we have the solution we copy it on the grid
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp copy write
*
*/
//! \cond [copy write] \endcond
fd.template copy<velocity,pressure>(x,{0,0},{sz[0]-1,sz[1]-1},g_dist);
g_dist.write("lid_driven_cavity_p_petsc");
//! \cond [cony write] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Finalize ## {#num_sk_inc_2D_ps_fin}
*
* At the very end of the program we have always to de-initialize the library
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp fin lib
*
*/
//! \cond [fin lib] \endcond
openfpm_finalize();
//! \cond [fin lib] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* # Full code # {#num_sk_inc_2D_ps_code}
*
* \include Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp
*
*/
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Initialization ## {#num_sk_inc_2D_ps_init}
*
* After model our equation we:
* * Initialize the library
* * Define some useful constants
* * define Ghost size
* * Non-periodic boundary conditions
* * Padding domain expansion
*
* Padding and Ghost differ in the fact the padding extend the domain.
* Ghost is an extension for each sub-domain
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp init
*
*/
//! \cond [init] \endcond
// Initialize
openfpm_init(&argc,&argv);
// Names for the positions in cell
std::initializer_list<char> cc = {0,0};
std::initializer_list<char> ll = {0,-1};
std::initializer_list<char> bl = {-1,-1};
std::initializer_list<char> bb = {-1,0};
//! \cond [def equation] \endcond
// Create field that we have v_x, v_y, P
Field<v[x],lid_nn> v_x{ll}; // Definition 1 v_x
Field<v[y],lid_nn> v_y{bb}; // Definition 2 v_y
Field<P,lid_nn> Prs{cc}; // Definition 3 Prs
coeff<double,lid_nn> eta{1.0,bl}; // Coefficient
// Create the derivatives and Laplacians that are needed
Laplacian<lid_nn,CENTRAL> lap;
Der<x,lid_nn,CENTRAL> dx;
Der<y,lid_nn,CENTRAL> dy;
Der<x,lid_nn,FORWARD> dx_f;
Der<y,lid_nn,FORWARD> dy_f;
// Equations
auto vx_eq = eta*lap(v_x) - dx(Prs); // Eq1 V_x
auto vy_eq = eta*lap(v_y) - dy(Prs); // Eq2 V_y
auto ic_eq = dx_f(v_x) + dy_f(v_y); // Eq3 Incom
Avg<x,decltype(v_y)> avg_vy{v_y};
Avg<y,decltype(v_x)> avg_vx{v_x};
Avg<x,decltype(v_y),FORWARD> avg_vy_f{v_y};
Avg<y,decltype(v_x),FORWARD> avg_vx_f{v_x};
//! \cond [def equation] \endcond
// velocity in the grid is the property 0, pressure is the property 1
constexpr int velocity = 0;
constexpr int pressure = 1;
// Domain, a rectangle
Box<2,float> domain({0.0,0.0},{12.0,4.0});
// Ghost (Not important in this case but required)
Ghost<2,float> g(0.01);
// Grid points on x=96 and y=32
long int sz[] = {96,32};
size_t szu[2];
szu[0] = (size_t)sz[0];
szu[1] = (size_t)sz[1];
// We need one more point on the left and down part of the domain
// This is given by the boundary conditions that we impose.
//
Padding<2> pd({1,1},{0,0});
//! \cond [init] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* Distributed grid that store the solution
*
* \see \ref e0_s_grid_inst
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp grid inst
*
*/
//! \cond [grid inst] \endcond
grid_dist_id<2,float,aggregate<float[2],float>> g_dist(szu,domain,g);
//! \cond [grid inst] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* Solving the system above require the solution of a system like that
*
* \f$ Ax = b \quad x = A^{-1}b\f$
*
* where A is the system the discretize the left hand side of the equations + boundary conditions
* and b discretize the right hand size + boundary conditions
*
* FDScheme is the object that we use to produce the Matrix A and the vector b.
* Such object require the maximum extension of the stencil
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp fd scheme
*
*/
//! \cond [fd scheme] \endcond
// It is the maximum extension of the stencil (order 2 laplacian stencil has extension 1)
Ghost<2,long int> stencil_max(1);
// Finite difference scheme
FDScheme<lid_nn> fd(pd, stencil_max, domain, g_dist);
//! \cond [fd scheme] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Impose the equation on the domain ## {#num_sk_inc_2D_ps_ied}
*
* Here we impose the system of equation, we start from the incompressibility Eq imposed in the bulk with the
* exception of the first point {0,0} and than we set P = 0 in {0,0}, why we are doing this is again
* mathematical to have a well defined system, an intuitive explanation is that P and P + c are both
* solution for the incompressibility equation, this produce an ill-posed problem to make it well posed
* we set one point in this case {0,0} the pressure to a fixed constant for convenience P = 0
*
* The best way to understand what we are doing is to draw a smaller example like 8x8.
* Considering that we have one additional point on the left for padding we have a grid
* 9x9. If on each point we have v_x v_y and P unknown we have
* 9x9x3 = 243 unknown. In order to fully determine and unique solution we have to
* impose 243 condition. The code under impose (in the case of 9x9) between domain
* and bulk 243 conditions.
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp impose eq dom
*
*
*/
//! \cond [impose eq dom] \endcond
fd.impose<EQ_3>(ic_eq,0.0,{0,0},{sz[0]-2,sz[1]-2},cc,true);
fd.impose<EQ_3>(Prs,0.0,{0,0},{0,0},cc);
// Here we impose the Eq1 and Eq2
fd.impose<EQ_1>(vx_eq,0.0,{1,0},{sz[0]-2,sz[1]-2},ll);
fd.impose<EQ_2>(vy_eq,0.0,{0,1},{sz[0]-2,sz[1]-2},bb);
// v_x and v_y
// Imposing B1
fd.impose<EQ_1>(v_x,0.0,{0,0},{0,sz[1]-2},ll);
fd.impose<EQ_2>(avg_vy_f,0.0,{-1,0},{-1,sz[1]-1},bb);
// Imposing B2
fd.impose<EQ_1>(v_x,0.0,{sz[0]-1,0},{sz[0]-1,sz[1]-2},ll);
fd.impose<EQ_2>(avg_vy,1.0,{sz[0]-1,0},{sz[0]-1,sz[1]-1},bb);
// Imposing B3
fd.impose<EQ_1>(avg_vx_f,0.0,{0,-1},{sz[0]-1,-1},ll);
fd.impose<EQ_2>(v_y,0.0,{0,0},{sz[0]-2,0},bb);
// Imposing B4
fd.impose<EQ_1>(avg_vx,0.0,{0,sz[1]-1},{sz[0]-1,sz[1]-1},ll);
fd.impose<EQ_2>(v_y,0.0,{0,sz[1]-1},{sz[0]-2,sz[1]-1},bb);
// When we pad the grid, there are points of the grid that are not
// touched by the previous condition. Mathematically this lead
// to have too many variables for the conditions that we are imposing.
// Here we are imposing variables that we do not touch to zero
//
// Padding pressure
fd.impose<EQ_3>(Prs,0.0,{-1,-1},{sz[0]-1,-1},cc);
fd.impose<EQ_3>(Prs,0.0,{-1,sz[1]-1},{sz[0]-1,sz[1]-1},cc);
fd.impose<EQ_3>(Prs,0.0,{-1,0},{-1,sz[1]-2},cc);
fd.impose<EQ_3>(Prs,0.0,{sz[0]-1,0},{sz[0]-1,sz[1]-2},cc);
// Impose v_x Padding Impose v_y padding
fd.impose<EQ_1>(v_x,0.0,{-1,-1},{-1,sz[1]-1},cc);
fd.impose<EQ_2>(v_y,0.0,{-1,-1},{sz[0]-1,-1},cc);
//! \cond [impose eq dom] \endcond
/*!
* \page Stokes_0_2D_petsc Stokes incompressible 2D petsc
*
* ## Solve the system of equation ## {#num_sk_inc_2D_sse}
*
* Once we imposed all the equations we can retrieve the Matrix A and the vector b
* and pass these two element to the solver. In this example we are using PETSC solvers
* direct/Iterative solvers. While Umfpack
* has only one solver, PETSC wrap several solvers. The function best_solve set the solver in
* the modality to try multiple solvers to solve your system. The subsequent call to solve produce a report
* of all the solvers tried comparing them in error-convergence and speed. If you do not use
* best_solve try to solve your system with the default solver GMRES (That is the most robust iterative solver
* method)
*
* \snippet Numerics/Stoke_flow/0_2D_incompressible/main_petsc.cpp solver
*
*/
//! \cond [solver] \endcond
// Create a PETSC solver
petsc_solver<double> solver;
// Set the maxumum nunber if iterations
solver.setMaxIter(1000);
solver.setRestart(200);
// Give to the solver A and b, return x, the solution
auto x = solver.try_solve(fd.getA(),fd.getB());
//! \cond [solver] \endcond
/*!