Skip to content
Snippets Groups Projects
Commit 693a25c9 authored by Pietro Incardona's avatar Pietro Incardona
Browse files

Finally understanding why Vortex in scale does not scale. The problem is Conjugate-Gradient

parent 914d6ba0
No related branches found
No related tags found
No related merge requests found
/*
* CG.hpp
*
* Created on: Jun 16, 2017
* Author: i-bird
*/
#ifndef EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_
#define EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_
typedef grid_dist_id<3,float,aggregate<float>> grid_type_scal;
float norm(grid_type_scal & scl)
{
double nrm = 0.0;
Vcluster & v_cl = create_vcluster();
auto it = scl.getDomainIterator();
while (it.isNext())
{
auto p = it.get();
nrm += scl.template getProp<0>(p)*scl.template getProp<0>(p);
++it;
}
v_cl.sum(nrm);
v_cl.execute();
return nrm;
}
void copy(grid_type_scal & src, grid_type_scal & dst)
{
auto it = src.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
dst.getProp<0>(key) = src.getProp<0>(key);
++it;
}
}
void x_plus_alpha_p(grid_type_scal & x, grid_type_scal & p, float alpha)
{
auto it = x.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
x.getProp<0>(key) = x.getProp<0>(key) + alpha * p.getProp<0>(key);
++it;
}
}
void x_plus_alpha_p(grid_type_scal & xn, grid_type_scal & x, grid_type_scal & p, float alpha)
{
auto it = x.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
xn.getProp<0>(key) = x.getProp<0>(key) + alpha * p.getProp<0>(key);
++it;
}
}
float alpha(void (* A)(grid_type_scal & in, grid_type_scal & out),
grid_type_scal & r,
grid_type_scal & p,
grid_type_scal & Ap)
{
auto it = r.getDomainIterator();
double scal_r = 0.0;
double scal_pAp = 0.0;
A(p,Ap);
while (it.isNext())
{
auto key = it.get();
scal_r += r.template getProp<0>(key)*r.template getProp<0>(key);
scal_pAp += p.template getProp<0>(key)*Ap.template getProp<0>(key);
++it;
}
Vcluster & v_cl = create_vcluster();
v_cl.sum(scal_r);
v_cl.sum(scal_pAp);
v_cl.execute();
return scal_r / scal_pAp;
}
float beta(grid_type_scal & r,
grid_type_scal & rn)
{
auto it = r.getDomainIterator();
double scal_r = 0.0;
double scal_rn = 0.0;
while (it.isNext())
{
auto key = it.get();
scal_r += r.template getProp<0>(key)*r.template getProp<0>(key);
scal_rn += rn.template getProp<0>(key)*rn.template getProp<0>(key);
++it;
}
Vcluster & v_cl = create_vcluster();
v_cl.sum(scal_r);
v_cl.sum(scal_rn);
v_cl.execute();
return scal_rn / scal_r;
}
void CG(void (* A)(grid_type_scal & in, grid_type_scal & out), grid_type_scal & x, grid_type_scal & b)
{
grid_type_scal tmp(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal r(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal rn(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal p(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
grid_type_scal Ap(b.getDecomposition(),b.getGridInfo().getSize(),b.getDecomposition().getGhost());
// r_0 = b - Ax
copy(b,r);
A(x,Ap);
x_plus_alpha_p(r,Ap,-1);
std::cout << "It init: " << " " << sqrt(norm(r)) << std::endl;
// r0 = p0
copy(r,p);
for (size_t i = 0 ; i < 400 ; i++)
{
float alpha_c = alpha(A,r,p,Ap);
x_plus_alpha_p(x,p,alpha_c);
x_plus_alpha_p(rn,r,Ap,-alpha_c);
float r_norm = norm(rn);
std::cout << "It: " << i << " " << sqrt(r_norm) << std::endl;
if (sqrt(r_norm) < 0.1)
return;
float beta_c = beta(r,rn);
x_plus_alpha_p(p,rn,p,beta_c);
copy(rn,r);
}
}
#endif /* EXAMPLE_NUMERICS_VORTEX_IN_CELL_CG_HPP_ */
......@@ -115,8 +115,6 @@ typedef grid_dist_id<3,float,aggregate<float[3]>> grid_type;
// The type of the particles
typedef vector_dist<3,float,aggregate<float[3],float[3],float[3],float[3],float[3]>> particles_type;
#include "CG.hpp"
// radius of the torus
float ringr1 = 5.0/4.0;
// radius of the core of the torus
......@@ -132,11 +130,6 @@ float nu = 0.0001535;
// Time step
float dt = 0.006;
#ifdef SE_CLASS1
DIOCANE
#endif
// All the properties by index
constexpr unsigned int vorticity = 0;
constexpr unsigned int velocity = 0;
......@@ -306,30 +299,6 @@ const bool poisson_nn_helm::boundary[] = {PERIODIC,PERIODIC,PERIODIC};
//! \cond [poisson_syseq] \endcond
void OpA(grid_type_scal & in, grid_type_scal & out)
{
float fac1 = 0.25f/(in.spacing(0)*in.spacing(0));
float fac2 = 0.25f/(in.spacing(1)*in.spacing(1));
float fac3 = 0.25f/(in.spacing(2)*in.spacing(2));
in.ghost_get<0>();
auto it = in.getDomainIterator();
while (it.isNext())
{
auto p = it.get();
out.template getProp<0>(p) = fac1*(in.template get<0>(p.move(x,2))+in.template get<0>(p.move(x,-2)))+
fac2*(in.template get<0>(p.move(y,2))+in.template get<0>(p.move(y,-2)))+
fac3*(in.template get<0>(p.move(z,2))+in.template get<0>(p.move(z,-2)))-
2.0f*(fac1+fac2+fac3)*in.template get<0>(p);
++it;
}
}
/*! \page Vortex_in_cell_petsc Vortex in Cell 3D
*
* # Step 2: Helmotz-hodge projection # {#vic_hlm_proj}
......@@ -517,7 +486,7 @@ void helmotz_hodge_projection(grid_type & gr, const Box<3,float> & domain)
petsc_solver<double> solver;
// Set the conjugate-gradient as method to solve the linear solver
solver.setSolver(KSPCG);
solver.setSolver(KSPBCGS);
// Set the absolute tolerance to determine that the method is converged
solver.setAbsTol(0.1);
......@@ -535,34 +504,6 @@ void helmotz_hodge_projection(grid_type & gr, const Box<3,float> & domain)
tm_solve.stop();
//////////////// CG Call //////////////////////////
// Here we create a distributed grid to store the result of the helmotz projection
grid_dist_id<3,float,aggregate<float>> sol(gr.getDecomposition(),gr.getGridInfo().getSize(),g);
auto zit = sol.getDomainIterator();
while (zit.isNext())
{
auto p = zit.get();
sol.template getProp<0>(p) = 0.0;
++zit;
}
timer tm_solve2;
tm_solve2.start();
CG(OpA,sol,psi);
tm_solve2.stop();
std::cout << "Time to solve: " << tm_solve2.getwct() << " " << tm_solve.getwct() << std::endl;
///////////////////////////////////////////////////
// copy the solution x to the grid psi
fd.template copy<phi>(x_,psi);
......@@ -760,8 +701,8 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
// Create an PETSC solver
petsc_solver<double> solver;
solver.setSolver(KSPCG);
solver.setAbsTol(0.1);
solver.setSolver(KSPBCGS);
solver.setAbsTol(0.01);
solver.setMaxIter(500);
solver.log_monitor();
......@@ -782,34 +723,6 @@ void comp_vel(Box<3,float> & domain, grid_type & g_vort,grid_type & g_vel, petsc
tm_solve.stop();
//////////////// CG Call //////////////////////////
// Here we create a distributed grid to store the result of the helmotz projection
grid_dist_id<3,float,aggregate<float>> sol(gr_ps.getDecomposition(),gr_ps.getGridInfo().getSize(),g);
auto zit = sol.getDomainIterator();
while (zit.isNext())
{
auto p = zit.get();
sol.template getProp<0>(p) = 0.0;
++zit;
}
timer tm_solve2;
tm_solve2.start();
CG(OpA,sol,gr_ps);
tm_solve2.stop();
std::cout << "Time to solve: " << tm_solve2.getwct() << " " << tm_solve.getwct() << std::endl;
///////////////////////////////////////////////////
// Calculate the residual
solError serr;
......@@ -1246,7 +1159,7 @@ int main(int argc, char* argv[])
Ghost<3,long int> g(2);
// Grid points on x=128 y=64 z=64
long int sz[] = {512,128,128};
long int sz[] = {512,64,64};
size_t szu[] = {(size_t)sz[0],(size_t)sz[1],(size_t)sz[2]};
periodicity<3> bc = {{PERIODIC,PERIODIC,PERIODIC}};
......@@ -1326,7 +1239,7 @@ int main(int argc, char* argv[])
}
// Time Integration
for ( ; i < 1 ; i++)
for ( ; i < 10 ; i++)
{
// do step 4-5-6-7
do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s);
......@@ -1335,7 +1248,7 @@ int main(int argc, char* argv[])
rk_step1(particles);
// do step 9-10-11-12
/* do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s);
do_step(particles,g_vort,g_vel,g_dvort,domain,inte,phi_s);
// do step 13
rk_step2(particles);
......@@ -1353,7 +1266,7 @@ int main(int argc, char* argv[])
// every 100 steps write the output
if (i % 100 == 0) {check_point_and_save(particles,g_vort,g_vel,g_dvort,i);}
*/
}
openfpm_finalize();
......
......@@ -201,13 +201,13 @@ if [ x"$CXX" != x"icpc" ]; then
cp -r include $1/MUMPS
cp -r lib $1/MUMPS
MUMPS_extra_lib="--with-mumps-lib=\"$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a -pthread \""
MUMPS_extra_lib="$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a -pthread "
configure_options="$configure_options --with-mumps=yes --with-mumps-include=$1/MUMPS/include"
fi
else
echo "MUMPS already installed"
MUMPS_extra_lib="--with-mumps-lib=\"$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a -pthread \""
MUMPS_extra_lib="$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a -pthread "
configure_options="$configure_options --with-mumps=yes --with-mumps-include=$1/MUMPS/include"
fi
fi
......@@ -308,9 +308,9 @@ fi
tar -xf petsc-lite-3.6.4.tar.gz
cd petsc-3.6.4
echo "./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options "$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0"
echo "./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options --with-mumps-lib="$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0"
./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options "$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0
./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options --with-mumps-lib="$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0
make all test
make install
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment