Commit fa585d41 authored by incardon's avatar incardon

Update modules

parent f6522e41
# Change Log
All notable changes to this project will be documented in this file.
## [0.8.0] February
### Added
- Dynamic Load balancing
- Added SPH Dam break with Dynamic load balancing
- Added procedure for update ./install --update
(From 0.8.0 version will be supported for bug fixing, version 0.X.0 will be supported untill
0.X+2.0 will be out)
### Changed
- BOOST updated to 1.63
- Eigen updated to 3.3.7
## [0.7.0] 15 December 2016
### Added
......@@ -165,19 +178,18 @@ All notable changes to this project will be documented in this file.
- Algebraic Multigrid solver
- Parallel VTK, improved visualization
## [0.8.0] - Mid January 2017
## [0.10.0] - July 2017
### Added
- Dynamic Load Balancies examples and interface fixation
- Check Point restart
- More example and documentations
## [0.7.0] - December of October
## [0.9.0] - May 2017
### Added
- Asynchronous communication
- Support for Microsoft Windows with Cygwin
- Support for Docker/codenvy
- Defining a domain an invalid domain like Box<2,float> box({0.0,1.0},{0.0,1.0}) (the correct is {0.0,0.0},{1.0,1.0} )
produce dead-lock or unclear error message in SE_CLASS1, not hint is given, added usefull error message
......
......@@ -4,7 +4,7 @@
## Take all the options with the exception of --enable-install-req
AC_PREREQ(2.59)
AC_INIT(FULL-PACKAGE-NAME, VERSION, BUG-REPORT-ADDRESS)
AC_INIT(OpenFPM_pdata, 0.8.0, BUG-REPORT-ADDRESS)
AC_CANONICAL_SYSTEM
AC_CONFIG_SRCDIR([src/main.cpp])
AC_CONFIG_SUBDIRS([openfpm_data openfpm_devices openfpm_vcluster openfpm_io openfpm_numerics])
......
openfpm_devices @ a0b02db5
Subproject commit 90076f0c7ea6ac954d2b09fc8e84caa64024e8a6
Subproject commit a0b02db5938003755b85c86fded64b107ac4e55d
#!/bin/bash
# check if the directory $1/MPI exist
# check if the directory $1/BOOST exist
if [ -d "$1/BOOST" ]; then
echo "BOOST already installed"
exit 0
fi
wget http://ppmcore.mpi-cbg.de/upload/boost_1_60_0.tar.bz2
tar -xvf boost_1_60_0.tar.bz2
cd boost_1_60_0
wget http://ppmcore.mpi-cbg.de/upload/boost_1_63_0.tar.bz2
tar -xvf boost_1_63_0.tar.bz2
cd boost_1_63_0
./bootstrap.sh --with-toolset=$3
mkdir $1/BOOST
./b2 -j $2 install --prefix=$1/BOOST
rm -rf boost_1_60_0
rm -rf boost_1_63_0
......@@ -17,14 +17,16 @@ if [ ! -d "$1/SUITESPARSE" ]; then
exit 1
fi
wget http://ppmcore.mpi-cbg.de/upload/eigen-3.2.7.tar.bz2
rm -rf eigen-eigen-b30b87236a1b
tar -xf eigen-3.2.7.tar.bz2
wget http://ppmcore.mpi-cbg.de/upload/eigen-3.3.1.tar.bz2
rm -rf eigen-eigen-f562a193118d
tar -xf eigen-3.3.1.tar.bz2
cd eigen-eigen-b30b87236a1b
cd eigen-eigen-f562a193118d
mkdir $1/EIGEN/
mv Eigen $1/EIGEN/Eigen
cd ..
rm -rf eigen-eigen-b30b87236a1b
rm -rf eigen-eigen-f562a193118d
# Mark the installation
echo 1 > $1/EIGEN/version
......@@ -202,13 +202,13 @@ if [ x"$CXX" != x"icpc" ]; then
cp -r lib $1/MUMPS
MUMPS_extra_lib="--with-mumps-lib=\"$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a\""
configure_options="$configure_options --with-mumps=yes --with-mumps-include=$1/MUMPS/include"
configure_options="$configure_options --with-mumps=yes --with-mumps-include=$1/MUMPS/include"
fi
else
echo "MUMPS already installed"
MUMPS_extra_lib="--with-mumps-lib=\"$1/MUMPS/lib/libdmumps.a $1/MUMPS/lib/libmumps_common.a $1/MUMPS/lib/libpord.a\""
configure_options="$configure_options --with-mumps=yes --with-mumps-lib=\"$MUMPS_extra_lib\" --with-mumps-include=$1/MUMPS/include"
configure_options="$configure_options --with-mumps=yes --with-mumps-include=$1/MUMPS/include"
fi
fi
......@@ -308,9 +308,9 @@ fi
tar -xf petsc-lite-3.6.4.tar.gz
cd petsc-3.6.4
echo "./configure --with-cxx-dialect=C++11 --with-mpi-dir=$mpi_dir $configure_options --prefix=$1/PETSC --with-debugging=0"
echo "./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options "$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0"
./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $MUMPS_extra_lib $configure_options --prefix=$1/PETSC --with-debugging=0
./configure --with-cxx-dialect=C++11 $petsc_openmp --with-mpi-dir=$mpi_dir $configure_options "$MUMPS_extra_lib" --prefix=$1/PETSC --with-debugging=0
make all test
make install
......
......@@ -76,7 +76,7 @@ function remove_old()
## Check the installed version of the dependencies
if [ -d $1/BOOST ]; then
is_update=$(cat $1/BOOST/include/boost/version.hpp | grep "#define BOOST_VERSION 106003")
is_update=$(cat $1/BOOST/include/boost/version.hpp | grep "#define BOOST_VERSION 106300")
if [ x"$is_update" == x"" ]; then
echo -e "\033[1;34;5m --------------------------------------------------------------------------- \033[0m"
echo -e "\033[1;34;5m Boost has been updated to 1.63, the component will be updated automatically \033[0m"
......@@ -143,6 +143,8 @@ function remove_old()
echo -e "\033[1;34;5m ---------------------------------------------------------------------- \033[0m"
sleep 5
rm -rf $1/EIGEN/Eigen
rm -rf $1/EIGEN
rm -rf $1/PETSC
fi
fi
......
......@@ -48,7 +48,7 @@ struct GBoxes
#define FREE 1
#define FIXED 2
#include "grid_dist_key.hpp"
#include "Grid/grid_dist_key.hpp"
#include "VCluster/VCluster.hpp"
......
......@@ -9,7 +9,8 @@
#define SRC_GRID_GRID_DIST_ID_ITERATOR_DEC_HPP_
#include "grid_dist_id_iterator.hpp"
#include "grid_dist_util.hpp"
#include "Grid/grid_dist_util.hpp"
#include "grid_dist_id_iterator_util.hpp"
/*! \brief Given the decomposition it create an iterator
*
......@@ -40,41 +41,6 @@ class grid_dist_id_iterator_dec
typename Decomposition::stype spacing[Decomposition::dims];
/*! \brief compute the subset where it has to iterate
*
* \param g_c Actual grid
* \param start_c adjusted start point for the grid g_c
* \param stop_c adjusted stop point for the grid g_c
*
* \return false if the sub-set does not contain points
*
*/
bool compute_subset(size_t gc, grid_key_dx<Decomposition::dims> & start_c, grid_key_dx<Decomposition::dims> & stop_c)
{
// Intersect the grid keys
for (size_t i = 0 ; i < Decomposition::dims ; i++)
{
long int start_p = gdb_ext.get(g_c).Dbox.getP1().get(i) + gdb_ext.get(g_c).origin.get(i);
long int stop_p = gdb_ext.get(g_c).Dbox.getP2().get(i) + gdb_ext.get(g_c).origin.get(i);
if (start.get(i) <= start_p)
start_c.set_d(i,gdb_ext.get(g_c).Dbox.getP1().get(i));
else if (start.get(i) <= stop_p)
start_c.set_d(i,start.get(i) - gdb_ext.get(g_c).origin.get(i));
else
return false;
if (stop.get(i) >= stop_p)
stop_c.set_d(i,gdb_ext.get(g_c).Dbox.getP2().get(i));
else if (stop.get(i) >= start_p)
stop_c.set_d(i,stop.get(i) - gdb_ext.get(g_c).origin.get(i));
else
return false;
}
return true;
}
/*! \brief from g_c increment g_c until you find a valid grid
*
*/
......@@ -86,7 +52,7 @@ class grid_dist_id_iterator_dec
// When the grid has size 0 potentially all the other informations are garbage
while (g_c < gdb_ext.size() &&
(gdb_ext.get(g_c).Dbox.isValid() == false || compute_subset(g_c,start_c,stop_c) == false ))
(gdb_ext.get(g_c).Dbox.isValid() == false || compute_subset<Decomposition>(gdb_ext,g_c,start,stop,start_c,stop_c) == false ))
{g_c++;}
// get the next grid iterator
......@@ -118,6 +84,8 @@ class grid_dist_id_iterator_dec
*
* \param tmp iterator to copy
*
* \return itself
*
*/
grid_dist_id_iterator_dec<Decomposition> & operator=(const grid_dist_id_iterator_dec<Decomposition> & tmp)
{
......@@ -246,7 +214,7 @@ class grid_dist_id_iterator_dec
*/
inline grid_key_dx<Decomposition::dims> get()
{
const grid_dist_key_dx<Decomposition::dims> & k = get_int();
const grid_dist_key_dx<Decomposition::dims> k = get_int();
// Get the sub-domain id
size_t sub_id = k.getSub();
......@@ -258,6 +226,26 @@ class grid_dist_id_iterator_dec
return k_glob;
}
/*! \brief Get the starting point of the sub-grid we are iterating
*
* \return the starting point
*
*/
inline grid_key_dx<Decomposition::dims> getStart()
{
return start;
}
/*! \brief Get the starting point of the sub-grid we are iterating
*
* \return the stop point
*
*/
inline grid_key_dx<Decomposition::dims> getStop()
{
return stop;
}
};
......
/*
* grid_dist_id_iterator_dec_skin.hpp
*
* Created on: Jan 4, 2017
* Author: i-bird
*/
#ifndef SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_DEC_SKIN_HPP_
#define SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_DEC_SKIN_HPP_
#include "grid_dist_id_iterator.hpp"
#include "Grid/grid_dist_util.hpp"
#include "grid_dist_id_iterator_util.hpp"
/*! \brief Given the decomposition it create an iterator
*
* Iterator across the local elements of the distributed grid
*
* \tparam dec Decomposition type
*
*/
template<typename Decomposition>
class grid_dist_id_iterator_dec_skin : protected grid_skin_iterator_bc<Decomposition::dims>
{
//! a_its element in this moment selected
size_t a_its_p;
//! grid list counter
size_t g_c;
//! Extension of each grid: domain and ghost + domain
openfpm::vector<GBoxes<Decomposition::dims>> gdb_ext;
grid_key_dx_iterator_sub<Decomposition::dims> a_it;
struct gp_sub
{
//! from which grid this iterator come from
size_t gc;
//! Iterator
grid_key_dx_iterator_sub<Decomposition::dims> it;
gp_sub(size_t gc, grid_key_dx_iterator_sub<Decomposition::dims> && it)
:gc(gc),it(it)
{}
};
//! Actual sub-iterators
openfpm::vector<gp_sub> a_its;
//! Spacing
typename Decomposition::stype spacing[Decomposition::dims];
/*! \brief from g_c increment g_c until you find a valid grid
*
*/
void selectValidGrid()
{
if (a_its_p < a_its.size())
{
g_c = a_its.get(a_its_p).gc;
a_it.reinitialize(a_its.get(a_its_p).it);
}
else
g_c = gdb_ext.size();
}
/*! \brief construct sub-iterators
*
*
*
*/
void construct_sub_it()
{
// Construct the sub iterators
for (size_t i = 0 ; i < 2*Decomposition::dims; i++)
{
for (size_t gc = 0 ; gc < gdb_ext.size() ; gc++)
{
grid_key_dx<Decomposition::dims> start = this->sub_it[i].getStart();
grid_key_dx<Decomposition::dims> stop = this->sub_it[i].getStop();
grid_key_dx<Decomposition::dims> start_c;
grid_key_dx<Decomposition::dims> stop_c;
if (compute_subset<Decomposition>(gdb_ext,gc,start,stop,start_c,stop_c) == true)
{
// Convert global coordinate start_c stop_c into local
// and calculate the grid sizes
size_t sz[Decomposition::dims];
for (size_t j = 0 ; j < Decomposition::dims ; j++)
sz[j] = gdb_ext.get(gc).GDbox.getHigh(j) + 1;
grid_sm<Decomposition::dims,void> g_sm(sz);
// Non empty sub-set
a_its.add(gp_sub(gc,grid_key_dx_iterator_sub<Decomposition::dims>(g_sm,start_c,stop_c)));
}
}
}
}
/*! \brief Get the actual key
*
* \return the actual key
*
*/
inline grid_dist_key_dx<Decomposition::dims> get_int()
{
return grid_dist_key_dx<Decomposition::dims>(g_c,a_it.get());
}
public:
/*! \brief Copy constructor
*
* \param tmp iterator to copy
*
*/
grid_dist_id_iterator_dec_skin(const grid_dist_id_iterator_dec_skin<Decomposition> & tmp)
:grid_skin_iterator_bc<Decomposition::dims>(tmp),a_its_p(0)
{
this->operator=(tmp);
}
/*! \brief Copy constructor
*
* \param tmp iterator to copy
*
*/
grid_dist_id_iterator_dec_skin(grid_dist_id_iterator_dec_skin<Decomposition> && tmp)
:grid_skin_iterator_bc<Decomposition::dims>(tmp),a_its_p(0)
{
this->operator=(tmp);
}
/*! \brief Constructor of the distributed grid iterator
*
* \param dec Decomposition
* \param sz size of the grid
* \param bc boundary conditions
*
*/
grid_dist_id_iterator_dec_skin(Decomposition & dec,
const grid_sm<Decomposition::dims,void> & g_sm,
const Box<Decomposition::dims,size_t> & A,
const Box<Decomposition::dims,size_t> & B,
const size_t (& bc)[Decomposition::dims])
:grid_skin_iterator_bc<Decomposition::dims>(g_sm,A,B,bc),a_its_p(0),g_c(0)
{
// From the decomposition construct gdb_ext
create_gdb_ext<Decomposition::dims,Decomposition>(gdb_ext,dec,g_sm.getSize(),dec.getDomain(),spacing);
// This iterato only work if A is contained into B
if (A.isContained(B) == false)
std::cout << __FILE__ << ":" << __LINE__ << ", Error Box A must be contained into box B" << std::endl;
// construct sub_iterators
construct_sub_it();
// Initialize the current iterator
// with the first grid
selectValidGrid();
}
// Destructor
~grid_dist_id_iterator_dec_skin()
{
}
/*! \brief Get the next element
*
* \return the next grid_key
*
*/
inline grid_dist_id_iterator_dec_skin<Decomposition> & operator++()
{
++a_it;
// check if a_it is at the end
if (a_it.isNext() == true)
return *this;
else
{
// switch to the new grid
a_its_p++;
selectValidGrid();
}
return *this;
}
/*! \brief Check if there is the next element
*
* \return true if there is the next, false otherwise
*
*/
inline bool isNext()
{
// If there are no other grid stop
if (g_c >= gdb_ext.size())
return false;
return true;
}
/*! \brief Get the spacing of the grid
*
* \param i
*
*/
inline typename Decomposition::stype getSpacing(size_t i)
{
return spacing[i];
}
/*! \brief Get the actual global key of the grid
*
*
* \return the global position in the grid
*
*/
inline grid_key_dx<Decomposition::dims> get()
{
const grid_dist_key_dx<Decomposition::dims> k = get_int();
// Get the sub-domain id
size_t sub_id = k.getSub();
grid_key_dx<Decomposition::dims> k_glob = k.getKey();
// shift
k_glob = k_glob + gdb_ext.get(sub_id).origin;
if (k_glob.get(0) > 11)
{
int debug = 0;
debug++;
}
return k_glob;
}
/*! \brief Copy operator=
*
* \param tmp iterator to copy
*
*/
grid_dist_id_iterator_dec_skin<Decomposition> & operator=(const grid_dist_id_iterator_dec_skin<Decomposition> & tmp)
{
a_its_p = tmp.a_its_p;
g_c = tmp.g_c;
gdb_ext = tmp.gdb_ext;
a_its = tmp.a_its;
for (size_t i = 0 ; i < Decomposition::dims ; i++)
spacing[i] = tmp.spacing[i];
a_it.reinitialize(tmp.a_it);
return *this;
}
/*! \brief Copy operator=
*
* \param tmp iterator to copy
*
*/
grid_dist_id_iterator_dec_skin<Decomposition> & operator=(grid_dist_id_iterator_dec_skin<Decomposition> && tmp)
{
a_its_p = tmp.a_its_p;
g_c = tmp.g_c;
gdb_ext = tmp.gdb_ext;
a_its = tmp.a_its;
for (size_t i = 0 ; i < Decomposition::dims ; i++)
spacing[i] = tmp.spacing[i];
a_it.reinitialize(tmp.a_it);
return *this;
}
};
#endif /* SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_DEC_SKIN_HPP_ */
/*
* grid_dist_id_iterator_util.hpp
*
* Created on: Jan 6, 2017
* Author: i-bird
*/
#ifndef SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_UTIL_HPP_
#define SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_UTIL_HPP_
/*! \brief compute the subset where it has to iterate
*
* \param g_c Actual grid
* \param start iterator start in global coordinate
* \param stop iterator stop in global coordinate
* \param start_c adjusted start point for the grid g_c
* \param stop_c adjusted stop point for the grid g_c
*
* \return false if the sub-set does not contain points
*
*/
template<typename Decomposition> static inline bool compute_subset(const openfpm::vector<GBoxes<Decomposition::dims>> & gdb_ext, size_t g_c, grid_key_dx<Decomposition::dims> & start, grid_key_dx<Decomposition::dims> & stop, grid_key_dx<Decomposition::dims> & start_c, grid_key_dx<Decomposition::dims> & stop_c)
{
// Intersect the grid keys
for (size_t i = 0 ; i < Decomposition::dims ; i++)
{
long int start_p = gdb_ext.get(g_c).Dbox.getP1().get(i) + gdb_ext.get(g_c).origin.get(i);
long int stop_p = gdb_ext.get(g_c).Dbox.getP2().get(i) + gdb_ext.get(g_c).origin.get(i);
if (start.get(i) <= start_p)
start_c.set_d(i,gdb_ext.get(g_c).Dbox.getP1().get(i));
else if (start.get(i) <= stop_p)
start_c.set_d(i,start.get(i) - gdb_ext.get(g_c).origin.get(i));
else
return false;
if (stop.get(i) >= stop_p)
stop_c.set_d(i,gdb_ext.get(g_c).Dbox.getP2().get(i));
else if (stop.get(i) >= start_p)
stop_c.set_d(i,stop.get(i) - gdb_ext.get(g_c).origin.get(i));
else
return false;
}
return true;
}
#endif /* SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATOR_UTIL_HPP_ */
/*
* grid_dist_id_iterators_unit_tests.hpp
*
* Created on: Jan 4, 2017
* Author: i-bird
*/
#ifndef SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATORS_UNIT_TESTS_HPP_
#define SRC_GRID_ITERATORS_GRID_DIST_ID_ITERATORS_UNIT_TESTS_HPP_
#include "grid_dist_id_iterator_dec_skin.hpp"
BOOST_AUTO_TEST_SUITE( grid_dist_id_iterators_test )
void print_test(std::string test, size_t sz)
{
if (create_vcluster().getProcessUnitID() == 0)
std::cout << test << " " << sz << "\n";
}
void Test2D_sub(const Box<2,float> & domain, long int k)
{
long int big_step = k / 30;
big_step = (big_step == 0)?1:big_step;
long int small_step = 21;
// this test is only performed when the number of processor is <= 32
if (create_vcluster().getProcessingUnits() > 32)
return;
print_test( "Testing 2D grid sub iterator k<=",k);
// 2D test
for ( ; k >= 2 ; k-= (k > 2*big_step)?big_step:small_step )
{
BOOST_TEST_CHECKPOINT( "Testing 2D grid k=" << k );
// grid size
size_t sz[2];
sz[0] = k;
sz[1] = k;
float factor = pow(create_vcluster().getProcessingUnits()/2.0f,1.0f/2.0f);
// Ghost
Ghost<2,float> g(0.01 / factor);
// Distributed grid with id decomposition
grid_dist_id<2, float, scalar<float>> g_dist(sz,domain,g);
// check the consistency of the decomposition
bool val = g_dist.getDecomposition().check_consistency();
BOOST_REQUIRE_EQUAL(val,true);
size_t count;
// Grid sm
grid_sm<2,void> info(sz);
{
//! [Usage of a sub_grid iterator]
grid_key_dx<2> one(1,1);
grid_key_dx<2> one_end(k-2,k-2);
bool check = true;
count = 0;
// get the sub-domain iterator
auto dom = g_dist.getSubDomainIterator(one,one_end);
while (dom.isNext())
{
auto key = dom.get();
auto key_g = g_dist.getGKey(key);
// key_g should never be 1 or k-1
check &= (key_g.get(0) == 0 || key_g.get(0) == k-1)?false:true;
check &= (key_g.get(1) == 0 || key_g.get(1) == k-1)?false:true;
g_dist.template get<0>(key) = info.LinId(key_g);
// Count the point
count++;
++dom;
}
BOOST_REQUIRE_EQUAL(check,true);
//! [Usage of a sub_grid iterator]
}
// Get the virtual cluster machine
Vcluster & vcl = g_dist.getVC();
// reduce
vcl.sum(count);
vcl.execute();
// Check
BOOST_REQUIRE_EQUAL(count,(size_t)(k-2)*(k-2));
// check with a 1x1 square
{