Commit 3b4db8ae authored by incardon's avatar incardon

Adding CG for Vortrx in Cell

parent ed10df48
/*
* CG.hpp
*
* Created on: Jun 16, 2017
* Author: i-bird
*/
#ifndef EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_
#define EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_
typedef grid_dist_id<3,float,aggregate<float>> grid_type_scal;
float norm(grid_type_scal & scl)
{
double nrm = 0.0;
Vcluster & v_cl = create_vcluster();
auto it = scl.getDomainIterator();
while (it.isNext())
{
auto p = it.get();
nrm += scl.template getProp<0>(p)*scl.template getProp<0>(p);
++it;
}
v_cl.sum(nrm);
v_cl.execute();
return nrm;
}
void copy(grid_type_scal & src, grid_type_scal & dst)
{
auto it = src.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
dst.getProp<0>(key) = src.getProp<0>(key);
++it;
}
}
void x_plus_alpha_p(grid_type_scal & x, grid_type_scal & p, float alpha)
{
auto it = x.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
x.getProp<0>(key) = x.getProp<0>(key) + alpha * p.getProp<0>(key);
++it;
}
}
void x_plus_alpha_p(grid_type_scal & xn, grid_type_scal & x, grid_type_scal & p, float alpha)
{
auto it = x.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
xn.getProp<0>(key) = x.getProp<0>(key) + alpha * p.getProp<0>(key);
++it;
}
}
float alpha(void (* A)(grid_type_scal & in, grid_type_scal & out),
grid_type_scal & r,
grid_type_scal & p,
grid_type_scal & Ap)
{
auto it = r.getDomainIterator();
double scal_r = 0.0;
double scal_pAp = 0.0;
A(p,Ap);
while (it.isNext())
{
auto key = it.get();
scal_r += r.template getProp<0>(key)*r.template getProp<0>(key);
scal_pAp += p.template getProp<0>(key)*Ap.template getProp<0>(key);
++it;
}
Vcluster & v_cl = create_vcluster();
v_cl.sum(scal_r);
v_cl.sum(scal_pAp);
v_cl.execute();
return scal_r / scal_pAp;
}
float beta(grid_type_scal & r,
grid_type_scal & rn)
{
auto it = r.getDomainIterator();
double scal_r = 0.0;
double scal_rn = 0.0;
while (it.isNext())
{
auto key = it.get();
scal_r += r.template getProp<0>(key)*r.template getProp<0>(key);
scal_rn += rn.template getProp<0>(key)*rn.template getProp<0>(key);
++it;
}
Vcluster & v_cl = create_vcluster();
v_cl.sum(scal_r);
v_cl.sum(scal_rn);
v_cl.execute();
return scal_rn / scal_r;
}
void CG(void (* A)(grid_type_scal & in, grid_type_scal & out), grid_type_scal & x, grid_type_scal & b)
{
grid_type_scal tmp(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal r(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal rn(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal p(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal Ap(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
// r_0 = b - Ax
copy(b,r);
A(x,Ap);
x_plus_alpha_p(r,Ap,-1);
// r0 = p0
copy(r,p);
for (size_t i = 0 ; i < 400 ; i++)
{
float alpha_c = alpha(A,r,p,Ap);
x_plus_alpha_p(x,p,alpha_c);
x_plus_alpha_p(rn,r,Ap,-alpha_c);
float r_norm = norm(rn);
std::cout << "It: " << i << " " << r_norm << std::endl;
if (r_norm < 0.1)
return;
float beta_c = beta(r,rn);
x_plus_alpha_p(p,rn,p,beta_c);
copy(rn,r);
}
}
#endif /* EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_ */
......@@ -115,6 +115,8 @@ typedef grid_dist_id<3,float,aggregate<float[3]>> grid_type;
// The type of the particles
typedef vector_dist<3,float,aggregate<float[3],float[3],float[3],float[3],float[3]>> particles_type;
#include "CG.hpp"
// radius of the torus
float ringr1 = 5.0/4.0;
// radius of the core of the torus
......@@ -300,6 +302,29 @@ const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
//! \cond [poisson_syseq] \endcond
void OpA(grid_type_scal & in, grid_type_scal & out)
{
float fac1 = 0.25f/(in.spacing(0)*in.spacing(0));
float fac2 = 0.25f/(in.spacing(1)*in.spacing(1));
float fac3 = 0.25f/(in.spacing(2)*in.spacing(2));
in.ghost_get<0>();
auto it = in.getDomainIterator();
while (it.isNext())
{
auto p = it.get();
out.template getProp<0>(p) = fac1*(in.template get<0>(p.move(x,2))+in.template get<0>(p.move(x,-2)))+
fac2*(in.template get<0>(p.move(y,2))+in.template get<0>(p.move(y,-2)))+
fac3*(in.template get<0>(p.move(z,2))+in.template get<0>(p.move(z,-2)))-
2.0f*(fac1+fac2+fac3)*in.template get<0>(p);
++it;
}
}
/*! \page Vortex_in_cell_petsc Vortex in Cell 3D
*
......@@ -499,6 +524,26 @@ void helmotz_hodge_projection(grid_type & gr, const Box<3,float> & domain)
// Give to the solver A and b, return x, the solution
auto x_ = solver.solve(fd.getA(),fd.getB());
//////////////// CG Call //////////////////////////
// Here we create a distributed grid to store the result of the helmotz projection
grid_dist_id<3,float,aggregate<float>> sol(gr.getDecomposition(),gr.getGridInfo().getSize(),g);
auto zit = sol.getDomainIterator();
while (zit.isNext())
{
auto p = zit.get();
sol.template getProp<0>(p) = 0.0;
++zit;
}
CG(OpA,sol,psi);
///////////////////////////////////////////////////
// copy the solution x to the grid psi
fd.template copy<phi>(x_,psi);
......@@ -697,8 +742,9 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
petsc_solver<double> solver;
solver.setSolver(KSPCG);
solver.setAbsTol(0.001);
solver.setAbsTol(0.1);
solver.setMaxIter(500);
solver.log_monitor();
// Get the sparse matrix that represent the left-hand-side
// of the equation
......@@ -712,6 +758,26 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
// time step
solver.solve(A,phi_s[i],b);
//////////////// CG Call //////////////////////////
// Here we create a distributed grid to store the result of the helmotz projection
grid_dist_id<3,float,aggregate<float>> sol(gr_ps.getDecomposition(),gr_ps.getGridInfo().getSize(),g);
auto zit = sol.getDomainIterator();
while (zit.isNext())
{
auto p = zit.get();
sol.template getProp<0>(p) = 0.0;
++zit;
}
CG(OpA,sol,gr_ps);
///////////////////////////////////////////////////
// Calculate the residual
solError serr;
......@@ -724,6 +790,13 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
// copy the solution to grid
fd.template copy<phi>(phi_s[i],gr_ps);
/////////////// DEBUG ///////////////
gr_ps.write("Solution_petsc_" + std::to_string(i));
sol.write("Solution_my_" + std::to_string(i));
/////////////////////////////////////
//! \cond [solve_poisson_comp] \endcond
//! \cond [copy_to_phi_v] \endcond
......@@ -743,6 +816,8 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
//! \cond [copy_to_phi_v] \endcond
}
exit(0);
//! \cond [curl_phi_v] \endcond
phi_v.ghost_get<phi>();
......
......@@ -959,6 +959,25 @@ public:
return total;
}
/*! \brief Get the size of local domain grids
*
* \return The size of the local domain
*
*/
size_t getLocalDomainWithGhostSize() const
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
size_t total = 0;
for (size_t i = 0 ; i < gdb_ext.size() ; i++)
{
total += gdb_ext.get(i).GDbox.getVolumeKey();
}
return total;
}
/*! \brief It return the informations about the local grids
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment