diff --git a/CHANGELOG.md b/CHANGELOG.md index e5b701ba6067648d3c9ee47d92b74ab20c729880..a7c64dd8d614791a26373e5639d545886e1a184c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,8 +9,9 @@ All notable changes to this project will be documented in this file. ### Fixed - Installation PETSC installation fail in case of preinstalled MPI +- Miss-compilation of SUITESPARSE on gcc-6.2 - vector_dist with negative domain (Now supported) -- Grid 1D fixing +- Grid 1D has been fixed ### Changed diff --git a/script/install_SUITESPARSE.sh b/script/install_SUITESPARSE.sh index 8ad698013d80686f6527944d92bb7c2bd6412815..73cba06e438587546c9544426e4deea0f1ad95d5 100755 --- a/script/install_SUITESPARSE.sh +++ b/script/install_SUITESPARSE.sh @@ -43,7 +43,7 @@ else sed -i "/INSTALL_LIB\s=\s\/usr\/local\/lib/c\INSTALL_LIB = $1\/SUITESPARSE\/lib" SuiteSparse_config/SuiteSparse_config.mk sed -i "/INSTALL_INCLUDE\s=\s\/usr\/local\/include/c\INSTALL_INCLUDE = $1\/SUITESPARSE\/include" SuiteSparse_config/SuiteSparse_config.mk sed -i "/\sLAPACK\s=\s-llapack/c\LAPACK = " SuiteSparse_config/SuiteSparse_config.mk - sed -i "/\sBLAS\s=\s\-lopenblas/c\BLAS = -L$1/OPENBLAS/lib -lopenblas" SuiteSparse_config/SuiteSparse_config.mk + sed -i "/\sBLAS\s=\s\-lopenblas/c\BLAS = -L$1/OPENBLAS/lib -lopenblas -lpthread" SuiteSparse_config/SuiteSparse_config.mk fi diff --git a/src/Decomposition/CartDecomposition.hpp b/src/Decomposition/CartDecomposition.hpp index 144150979487aaa9c21ceb3bcc828a86fc6f4dc6..daf0682dab1d53a22af31494418a37a616a55d2e 100755 --- a/src/Decomposition/CartDecomposition.hpp +++ b/src/Decomposition/CartDecomposition.hpp @@ -75,6 +75,7 @@ * \see calculateGhostBoxes() for a visualization of internal and external ghost boxes * * ### Create a Cartesian decomposition object on a Box space, distribute, calculate internal and external ghost boxes + * * \snippet CartDecomposition_unit_test.hpp Create CartDecomposition * */ @@ -203,6 +204,7 @@ public: /*! \brief Constructor, it decompose and distribute the sub-domains across the processors * * \param v_cl Virtual cluster, used internally for communications + * \param bc boundary conditions * */ void createSubdomains(Vcluster & v_cl, const size_t (& bc)[dim]) @@ -405,30 +407,30 @@ public: * \verbatim -+----------------------------------------------------+ -| | -| Processor 8 | -| Sub+domain 0 +-----------------------------------+ -| | | -| | | -++--------------+---+---------------------------+----+ Processor 9 | - | | | B8_0 | | Subdomain 0 | - | +------------------------------------+ | - | | | | | | - | | | |B9_0| | - | | B | Local processor | | | - | Processor 5 | 5 | Subdomain 0 | | | - | Subdomain 0 | _ | +----------------------------------------+ - | | 0 | | | | - | | | | | | - | | | | | Processor 9 | - | | | |B9_1| Subdomain 1 | - | | | | | | - | | | | | | - | | | | | | - +--------------+---+---------------------------+----+ | - | | - +-----------------------------------+ + +----------------------------------------------------+ + | | + | Processor 8 | + | Sub+domain 0 +-----------------------------------+ + | | | + | | | + ++--------------+---+---------------------------+----+ Processor 9 | + | | | B8_0 | | Subdomain 0 | + | +------------------------------------+ | + | | | | | | + | | | |B9_0| | + | | B | Local processor | | | + | Processor 5 | 5 | Subdomain 0 | | | + | Subdomain 0 | _ | +----------------------------------------+ + | | 0 | | | | + | | | | | | + | | | | | Processor 9 | + | | | |B9_1| Subdomain 1 | + | | | | | | + | | | | | | + | | | | | | + +--------------+---+---------------------------+----+ | + | | + +-----------------------------------+ \endverbatim @@ -436,30 +438,32 @@ public: and also G8_0 G9_0 G9_1 G5_0 (External ghost boxes) - +----------------------------------------------------+ - | Processor 8 | - | Subdomain 0 +-----------------------------------+ - | | | - | +---------------------------------------------+ | - | | G8_0 | | | -+-----+---------------+------------------------------------+ | Processor 9 | -| | | | | Subdomain 0 | -| | | |G9_0| | -| | | | | | -| | | | | | -| | | Local processor | | | -| Processor 5 | | Sub+domain 0 | | | -| Subdomain 0 | | +-----------------------------------+ -| | | | | | -| | G | | | | -| | 5 | | | Processor 9 | -| | | | | | Subdomain 1 | -| | 0 | |G9_1| | -| | | | | | -| | | | | | -+---------------------+------------------------------------+ | | - | | | | - +----------------------------------------+----+------------------------------+ +\verbatim + + +----------------------------------------------------+ + | Processor 8 | + | Subdomain 0 +-----------------------------------+ + | | | + | +---------------------------------------------+ | + | | G8_0 | | | + +-----+---------------+------------------------------------+ | Processor 9 | + | | | | | Subdomain 0 | + | | | |G9_0| | + | | | | | | + | | | | | | + | | | Local processor | | | + | Processor 5 | | Sub+domain 0 | | | + | Subdomain 0 | | +-----------------------------------+ + | | | | | | + | | G | | | | + | | 5 | | | Processor 9 | + | | | | | | Subdomain 1 | + | | 0 | |G9_1| | + | | | | | | + | | | | | | + +---------------------+------------------------------------+ | | + | | | | + +----------------------------------------+----+------------------------------+ \endverbatim @@ -502,8 +506,10 @@ public: public: + //! Space dimensions static constexpr int dims = dim; + //! Space type typedef T stype; //! Increment the reference counter @@ -641,7 +647,11 @@ public: /*! \brief Apply boundary condition to the point * - * \param p Point to apply the boundary condition + * If the particle go out to the right, bring back the particle on the left + * in case of periodic, nothing in case of non periodic + * + * \param pt Point to apply the boundary condition. (it's coordinated are changed according the + * the explanation before) * */ void applyPointBC(float (& pt)[dim]) const @@ -655,7 +665,11 @@ public: /*! \brief Apply boundary condition to the point * - * \param p Point to apply the boundary condition + * If the particle go out to the right, bring back the particle on the left + * in case of periodic, nothing in case of non periodic + * + * \param pt Point to apply the boundary conditions.(it's coordinated are changed according the + * the explanation before) * */ void applyPointBC(Point<dim,T> & pt) const @@ -669,7 +683,11 @@ public: /*! \brief Apply boundary condition to the point * - * \param encapsulated object + * If the particle go out to the right, bring back the particle on the left + * in case of periodic, nothing in case of non periodic + * + * \param pt encapsulated point object (it's coordinated are changed according the + * the explanation before) * */ template<typename Mem> void applyPointBC(encapc<1,Point<dim,T>,Mem> && pt) const @@ -721,7 +739,7 @@ public: /*! \brief It create another object that contain the same information and act in the same way * - * \return a duplicated decomposition + * \return a duplicated CartDecomposition object * */ CartDecomposition<dim,T,Memory> duplicate() const @@ -755,6 +773,8 @@ public: * * \param cart element to copy * + * \return itself + * */ CartDecomposition<dim,T,Memory> & operator=(const CartDecomposition & cart) { @@ -785,6 +805,8 @@ public: * * \param cart element to copy * + * \return itself + * */ CartDecomposition<dim,T,Memory> & operator=(CartDecomposition && cart) { @@ -819,6 +841,10 @@ public: * it define in how many cell it will be divided the space for a particular required minimum * number of sub-domain * + * \param n_sub number of subdomains per processors + * + * \return grid dimension (it is one number because on the other dimensions is the same) + * */ static size_t getDefaultGrid(size_t n_sub) { @@ -828,6 +854,8 @@ public: } /*! \brief Given a point return in which processor the particle should go + * + * \param p point * * \return processorID * @@ -838,6 +866,8 @@ public: } /*! \brief Given a point return in which processor the particle should go + * + * \param p point * * \return processorID * @@ -848,6 +878,8 @@ public: } /*! \brief Given a point return in which processor the particle should go + * + * \param p point * * \return processorID * @@ -857,10 +889,12 @@ public: return fine_s.get(cd.getCell(p)); } - /*! \brief Given a point return in which processor the particle should go + /*! \brief Given a point return in which processor the point/particle should go * * Boundary conditions are considered * + * \param p point + * * \return processorID * */ @@ -876,6 +910,8 @@ public: * * Boundary conditions are considered * + * \param p point + * * \return processorID * */ @@ -891,6 +927,8 @@ public: * * Boundary consition are considered * + * \param p point position + * * \return processorID * */ @@ -939,7 +977,7 @@ public: * * \param div_ storing into how many sub-sub-domains to decompose on each dimension * \param domain_ domain to decompose - * \param bc_ boundary conditions + * \param bc boundary conditions * \param ghost Ghost size * */ @@ -962,6 +1000,10 @@ public: } + /*! \brief Delete the decomposition and reset the data-structure + * + * + */ void reset() { sub_domains.clear(); @@ -989,6 +1031,8 @@ public: } /*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call + * + * \param ts number of time step from the previous load balancing * */ void rebalance(size_t ts) @@ -1005,6 +1049,8 @@ public: } /*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call + * + * \param dlb Dynamic load balancing object * * \return true if the re-balance has been executed, false otherwise */ @@ -1075,9 +1121,10 @@ public: return dist.getNSubSubDomains(); } - /*! \brief function that set the weight of the vertex + /*! \brief Function that set the computational cost for a of a sub-sub domain * * \param id vertex id + * \param weight compotational cost * */ inline void setSubSubDomainComputationCost(size_t id, size_t weight) @@ -1085,9 +1132,11 @@ public: dist.setComputationCost(id, weight); } - /*! \brief function that set the weight of the vertex + /*! \brief function that return the computation cost of the sub-sub-domain id * - * \param id vertex id + * \param id sub-sub-domain id + * + * \return the computational cost * */ inline size_t getSubSubDomainComputationCost(size_t id) @@ -1116,7 +1165,8 @@ public: /*! \brief Get the local sub-domain * - * \param i (each local processor can have more than one sub-domain) + * \param lc (each local processor can have more than one sub-domain) + * * \return the sub-domain * */ @@ -1137,10 +1187,11 @@ public: return sp; } - /*! \brief Get the local sub-domain with ghost extension + /*! \brief Get the local sub-domain enlarged with ghost extension * - * \param i (each local processor can have more than one sub-domain) - * \return the sub-domain + * \param lc (each processor can have more than one sub-domain) + * + * \return the sub-domain extended * */ SpaceBox<dim, T> getSubDomainWithGhost(size_t lc) @@ -1182,7 +1233,7 @@ public: * * \warning if the particle id outside the domain the result is unreliable * - * \param p object position + * \param pos object position * * \return true if it is local * @@ -1194,11 +1245,12 @@ public: /*! \brief Check if the particle is local considering boundary conditions * - * \warning if the particle id outside the domain and non periodic the result + * \warning if the particle id outside the domain and non periodic boundary the result * is unreliable * * * \param p object position + * \param bc boundary conditions * * \return true if it is local * @@ -1217,8 +1269,12 @@ public: } /*! \brief Check if the particle is local considering boundary conditions + * + * \warning if the particle id outside the domain and non periodic boundary the result + * is unreliable * * \param p object position + * \param bc boundary conditions * * \return true if it is local * @@ -1251,6 +1307,8 @@ public: /*! \brief Return the ghost * * + * \return the ghost extension + * */ const Ghost<dim,T> & getGhost() const { @@ -1283,6 +1341,8 @@ public: * * \param output directory where to write the files * + * \return true if the write succeed + * */ bool write(std::string output) const { @@ -1358,7 +1418,9 @@ public: /*! \brief Check if the CartDecomposition contain the same information * - * \param ele Element to check + * \param cart Element to check with + * + * \return true if they are equal * */ bool is_equal(CartDecomposition<dim,T,Memory> & cart) @@ -1402,7 +1464,9 @@ public: /*! \brief Check if the CartDecomposition contain the same information with the exception of the ghost part * It is anyway required that the ghost come from the same sub-domains decomposition * - * \param ele Element to check + * \param cart Element to check with + * + * \return true if the two CartDecomposition are equal * */ bool is_equal_ng(CartDecomposition<dim,T,Memory> & cart) @@ -1463,8 +1527,7 @@ public: dist.setComputationCost(gid, c + i); } - // friend classes - + //! friend classes friend extended_type; }; diff --git a/src/Decomposition/CartDecomposition_unit_test.hpp b/src/Decomposition/CartDecomposition_unit_test.hpp index 5871b7ee822f3540276d96090fbf0cc6cda6aefe..034188054df229d1f2cd9ab33d4baf3aa0a85b7f 100755 --- a/src/Decomposition/CartDecomposition_unit_test.hpp +++ b/src/Decomposition/CartDecomposition_unit_test.hpp @@ -57,7 +57,6 @@ BOOST_AUTO_TEST_CASE( CartDecomposition_non_periodic_test) // Vcluster Vcluster & vcl = create_vcluster(); - //! [Create CartDecomposition] CartDecomposition<3, float> dec(vcl); // Physical domain @@ -83,8 +82,6 @@ BOOST_AUTO_TEST_CASE( CartDecomposition_non_periodic_test) dec.setParameters(div,box,bc,g); dec.decompose(); - //! [Create CartDecomposition] - // For each calculated ghost box for (size_t i = 0; i < dec.getNIGhostBox(); i++) { @@ -244,7 +241,6 @@ BOOST_AUTO_TEST_CASE( CartDecomposition_ext_non_periodic_test) // Vcluster Vcluster & vcl = create_vcluster(); - //! [Create CartDecomposition] CartDecomposition<3,float> dec(vcl); // Physical domain diff --git a/src/Decomposition/nn_processor_unit_test.hpp b/src/Decomposition/nn_processor_unit_test.hpp index bc904d7b952b5d963e5ab2baaafce703e4ccfd21..71297a3288d93a2e3f4e1ed1bf47410427522c42 100644 --- a/src/Decomposition/nn_processor_unit_test.hpp +++ b/src/Decomposition/nn_processor_unit_test.hpp @@ -93,9 +93,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_np_test) if (v_cl.getProcessUnitID() == 0) { - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1ul); const openfpm::vector< ::Box<2,float> > & nsubs1 = nnp.getNearSubdomains(1); const openfpm::vector< ::Box<2,float> > & nsubs2 = nnp.getNearSubdomains(2); @@ -120,9 +120,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_np_test) } else if (v_cl.getProcessUnitID() == 1) { - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1ul); const openfpm::vector< ::Box<2,float> > & nsubs1 = nnp.getNearSubdomains(0); const openfpm::vector< ::Box<2,float> > & nsubs2 = nnp.getNearSubdomains(2); @@ -147,9 +147,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_np_test) } else if (v_cl.getProcessUnitID() == 2) { - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(3),1ul); const openfpm::vector< ::Box<2,float> > & nsubs1 = nnp.getNearSubdomains(1); const openfpm::vector< ::Box<2,float> > & nsubs2 = nnp.getNearSubdomains(0); @@ -173,9 +173,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_np_test) } else if (v_cl.getProcessUnitID() == 3) { - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1); - BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(0),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(1),1ul); + BOOST_REQUIRE_EQUAL(nnp.getNRealSubdomains(2),1ul); const openfpm::vector< ::Box<2,float> > & nsubs1 = nnp.getNearSubdomains(0); const openfpm::vector< ::Box<2,float> > & nsubs2 = nnp.getNearSubdomains(1); @@ -250,9 +250,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_box_periodic_test) if (v_cl.getProcessUnitID() == 0) { - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4ul); openfpm::vector<Box<2,float>> bv; @@ -286,9 +286,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_box_periodic_test) } else if (v_cl.getProcessUnitID() == 1) { - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4ul); openfpm::vector<Box<2,float>> bv; @@ -322,9 +322,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_box_periodic_test) } else if (v_cl.getProcessUnitID() == 2) { - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(3).size(),4ul); openfpm::vector<Box<2,float>> bv; @@ -358,9 +358,9 @@ BOOST_AUTO_TEST_CASE( nn_processor_box_periodic_test) } else if (v_cl.getProcessUnitID() == 3) { - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4); - BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(0).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(1).size(),4ul); + BOOST_REQUIRE_EQUAL(nnp.getNearSubdomains(2).size(),4ul); openfpm::vector<Box<2,float>> bv; diff --git a/src/Grid/grid_dist_id_iterator.hpp b/src/Grid/grid_dist_id_iterator.hpp index 84b2cd8dbf2ee01cec32d5039277e12721278153..47b909debc5fcb75c86b8acba595eba39c97555f 100644 --- a/src/Grid/grid_dist_id_iterator.hpp +++ b/src/Grid/grid_dist_id_iterator.hpp @@ -116,9 +116,11 @@ class grid_dist_iterator<dim,device_grid,FREE> /*! \brief Constructor of the distributed grid iterator * * \param gk std::vector of the local grid + * \param gdb_ext set of local subdomains + * \param stop end point * */ - grid_dist_iterator(const openfpm::vector<device_grid> & gk, const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, grid_key_dx<dim> stop) + grid_dist_iterator(const openfpm::vector<device_grid> & gk, const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, const grid_key_dx<dim> & stop) :g_c(0),gList(gk),gdb_ext(gdb_ext),stop(stop) { // Initialize the current iterator diff --git a/src/Grid/grid_dist_id_unit_test.cpp b/src/Grid/grid_dist_id_unit_test.cpp index d26644b153e796db8abd2f0c007abc0db0f20532..89f04a8d0a9654c87b3459e30553de3dba4a8cfa 100644 --- a/src/Grid/grid_dist_id_unit_test.cpp +++ b/src/Grid/grid_dist_id_unit_test.cpp @@ -390,11 +390,131 @@ void Test2D(const Box<2,float> & domain, long int k) // In this case the boundary condition are non periodic if (g_dist.isInside(key_g)) { - match &= (g_dist.template get<0>(key),info.LinId(key_g)); + match &= (g_dist.template get<0>(key) == info.LinId(key_g))?true:false; } ++domg; } + + BOOST_REQUIRE_EQUAL(match,true); + } +} + + +void Test1D(const Box<1,float> & domain, long int k) +{ + Vcluster & v_cl = create_vcluster(); + long int big_step = k / 30; + big_step = (big_step == 0)?1:big_step; + long int small_step = 21; + + if (v_cl.getProcessingUnits() > 48) + return; + + print_test( "Testing 1D grid k<=",k); + + // 1D test + for ( ; k >= 2 ; k-= (k > 2*big_step)?big_step:small_step ) + { + BOOST_TEST_CHECKPOINT( "Testing 1D grid k=" << k ); + + //! [Create and access a distributed grid] + + // grid size + size_t sz[1]; + sz[0] = k; + + float factor = pow(create_vcluster().getProcessingUnits()/2.0f,1.0f); + + // Ghost + Ghost<1,float> g(0.01 / factor); + + // Distributed grid with id decomposition + grid_dist_id<1, float, scalar<float>> g_dist(sz,domain,g); + + // check the consistency of the decomposition + bool val = g_dist.getDecomposition().check_consistency(); + BOOST_REQUIRE_EQUAL(val,true); + + // Grid sm + grid_sm<1,void> info(sz); + + // get the domain iterator + size_t count = 0; + + auto dom = g_dist.getDomainIterator(); + + while (dom.isNext()) + { + auto key = dom.get(); + auto key_g = g_dist.getGKey(key); + + g_dist.template get<0>(key) = info.LinId(key_g); + + // Count the point + count++; + + ++dom; + } + + //! [Create and access a distributed grid] + + // Get the virtual cluster machine + Vcluster & vcl = g_dist.getVC(); + + // reduce + vcl.sum(count); + vcl.execute(); + + // Check + BOOST_REQUIRE_EQUAL(count,(size_t)k); + + auto dom2 = g_dist.getDomainIterator(); + + grid_key_dx<1> start = dom2.getStart(); + grid_key_dx<1> stop = dom2.getStop(); + + BOOST_REQUIRE_EQUAL((long int)stop.get(0),(long int)g_dist.size(0)-1); + + BOOST_REQUIRE_EQUAL(start.get(0),0); + + bool match = true; + + // check that the grid store the correct information + while (dom2.isNext()) + { + auto key = dom2.get(); + auto key_g = g_dist.getGKey(key); + + match &= (g_dist.template get<0>(key) == info.LinId(key_g))?true:false; + + ++dom2; + } + + BOOST_REQUIRE_EQUAL(match,true); + + g_dist.template ghost_get<0>(); + + // check that the communication is correctly completed + + auto domg = g_dist.getDomainGhostIterator(); + + // check that the grid with the ghost past store the correct information + while (domg.isNext()) + { + auto key = domg.get(); + auto key_g = g_dist.getGKey(key); + + // In this case the boundary condition are non periodic + if (g_dist.isInside(key_g)) + { + match &= (g_dist.template get<0>(key) == info.LinId(key_g))?true:false; + } + + ++domg; + } + + BOOST_REQUIRE_EQUAL(match,true); } } @@ -1770,6 +1890,16 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_copy ) Test_grid_copy(domain3,k); } +BOOST_AUTO_TEST_CASE( grid_1d_test ) +{ + // Domain + Box<1,float> domain1({-1.0},{1.0}); + + long int k = 32*32*32*create_vcluster().getProcessingUnits(); + + Test1D(domain1,k); +} + BOOST_AUTO_TEST_SUITE_END() #endif diff --git a/src/Vector/vector_dist_unit_test.hpp b/src/Vector/vector_dist_unit_test.hpp index a59e33784e240eace172656b5fd601c65704d086..47ee46c08b63a737eebab9dc67c9c4c371b80003 100644 --- a/src/Vector/vector_dist_unit_test.hpp +++ b/src/Vector/vector_dist_unit_test.hpp @@ -1191,8 +1191,13 @@ BOOST_AUTO_TEST_CASE( vector_dist_cell_verlet_test ) // Boundary conditions size_t bc[3]={PERIODIC,PERIODIC,PERIODIC}; + float spacing = 1.0/Ng; + float first_dist = spacing; + float second_dist = sqrt(2.0*spacing*spacing); + float third_dist = sqrt(3.0 * spacing*spacing); + // ghost - Ghost<3,float> ghost(1.0/(Ng-2)); + Ghost<3,float> ghost(third_dist*1.1); // Distributed vector vector_dist<3,float, Point_test<float>, CartDecomposition<3,float> > vd(0,box,bc,ghost); @@ -1229,14 +1234,11 @@ BOOST_AUTO_TEST_CASE( vector_dist_cell_verlet_test ) vd.ghost_get<0>(); + vd.write("Debug_output"); + // calculate the distance of the first, second and third neighborhood particle // Consider that they are on a regular grid - float spacing = it.getSpacing(0); - float first_dist = spacing; - float second_dist = sqrt(2.0*spacing*spacing); - float third_dist = sqrt(3.0 * spacing*spacing); - // add a 5% to dist first_dist += first_dist * 0.05; @@ -1249,6 +1251,8 @@ BOOST_AUTO_TEST_CASE( vector_dist_cell_verlet_test ) bool correct = true; + BOOST_REQUIRE_EQUAL(vd.size_local(),verlet.size()); + // for each particle for (size_t i = 0 ; i < verlet.size() ; i++) { diff --git a/src/dec_optimizer.hpp b/src/dec_optimizer.hpp index 737377a0d3d78f55f1f1dd180fcef1b7595bae97..d7f4f18f9110314cec1b80f1fb5f5d7d6c16f24a 100644 --- a/src/dec_optimizer.hpp +++ b/src/dec_optimizer.hpp @@ -6,86 +6,24 @@ /*! \brief this class represent a wavefront of dimension dim * - * \dim Dimensionality of the wavefront (dimensionality of the space + * \tparam dim Dimensionality of the wavefront (dimensionality of the space * where it live so the wavefront * is dim-1) * + * Each wavefront is identified by one starting point and one stop point. + * More or less a wavefront is just a box defined in the integer space + * */ - template <unsigned int dim> -class wavefront +class wavefront : public Box<dim,size_t> { public: - typedef boost::fusion::vector<size_t[dim],size_t[dim]> type; - - type data; - + //! start point is the property with id 0 (first property) static const int start = 0; - static const int stop = 1; - static const int max_prop = 2; - - /* \brief Get the key to the point 1 - * - * \return the key to the point 1 - * - */ - - grid_key_dx<dim> getKP1() - { - // grid key to return - grid_key_dx<dim> ret(boost::fusion::at_c<start>(data)); - - return ret; - } - - /* \brief Get the key to point 2 - * - * \return the key to the point 2 - * - */ - - grid_key_dx<dim> getKP2() - { - // grid key to return - grid_key_dx<dim> ret(boost::fusion::at_c<stop>(data)); - - return ret; - } - - /* \brief produce a box from an encapsulated object - * - * \param encap encapsulated object - * - */ - - template<typename encap> static Box<dim,size_t> getBox(const encap && enc) - { - Box<dim,size_t> bx; - - // Create the object from the encapsulation - getBox(enc,bx); - - return bx; - } - - /* \brief produce a box from an encapsulated object - * - * \param encap encapsulated object - * - */ - - template<typename encap> static void getBox(const encap & enc, Box<dim,size_t> & bx) - { - // Create the object from the encapsulation - - for (int i = 0 ; i < dim ; i++) - { - bx.setLow(i,enc.template get<wavefront::start>()[i]); - bx.setHigh(i,enc.template get<wavefront::stop>()[i]); - } - } + //! stop point is the property with id 1 (second property) + static const int stop = 1; }; /*! \brief This class take a graph representing the space decomposition and produce a @@ -100,8 +38,7 @@ public: template <unsigned int dim, typename Graph> class dec_optimizer { - // create a grid header for helping - + //! Contain information about the grid size grid_sm<dim,void> gh; private: @@ -111,7 +48,6 @@ private: * \param v_w wavefronts * \param w_comb wavefront expansion combinations * \param d direction of expansion - * \param bc boundary condition * */ void expand_one_wf(openfpm::vector<wavefront<dim>> & v_w, std::vector<comb<dim>> & w_comb , size_t d) @@ -126,6 +62,9 @@ private: /*! \brief Adjust the other wavefronts * + * \param v_w array of wavefronts + * \param hyp Hyper cube used to adjust the wavefront + * \param w_comb for each wavefront indicate their position (normal to the face of the wavefront) * \param d direction * */ @@ -154,7 +93,6 @@ private: size_t id = hyp.LinId(q_comb[j]); // get the combination of the direction d - bool is_pos = hyp.isPositive(d); // is positive, modify the stop point or the starting point @@ -169,16 +107,14 @@ private: } } - /* \brief Fill the wavefront position + /*! \brief Fill the wavefront position * * \tparam prp property to set * * \param graph we are processing - * \param Box to fill - * \param id value to fill with + * \param v_w array of wavefronts * */ - template<unsigned int prp> void write_wavefront(Graph & graph,openfpm::vector<wavefront<dim>> & v_w) { // fill the wall domain with 0 @@ -195,12 +131,12 @@ private: } } - /* \brief Fill the domain + /*! \brief Fill the domain * * \tparam p_sub property to set with the sub-domain id * * \param graph we are processing - * \param Box to fill + * \param box Box to fill * \param ids value to fill with * */ @@ -226,16 +162,17 @@ private: } } - /* \brief Add the boundary domain of id p_id to the queue + /*! \brief Add the boundary domain of id p_id to the queue * - * \tparam i-property where is stored the decomposition + * \tparam p_sub property id where to store the sub-domain decomposition + * \tparam p_id property id where is stored the decomposition * - * \param domains vector with domains to process + * \param domains vector with sub-sub-domains still to process + * \param v_w array of wave-fronts * \param graph we are processing - * \param w_comb hyper-cube combinations - * \param p_id processor id - * \param box_nn_processor list of neighborhood processors for the box - * \param bc Boundary conditions + * \param w_comb wavefront combination, it is the normal vector to the wavefront + * \param pr_id processor id for which we are optimizing the decomposition + * \param bc boundary conditions * */ template<unsigned int p_sub, unsigned int p_id> void add_to_queue(openfpm::vector<size_t> & domains, openfpm::vector<wavefront<dim>> & v_w, Graph & graph, std::vector<comb<dim>> & w_comb, long int pr_id, const size_t(& bc)[dim]) @@ -308,7 +245,7 @@ private: domains.swap(domains_new); } - /* \brief Find the biggest hyper-cube + /*! \brief Find the biggest hyper-cube * * starting from one initial sub-domain find the biggest hyper-cube * output the box, and fill a list of neighborhood processor @@ -402,38 +339,41 @@ private: // expand the intersection of the wavefronts - std::vector<comb<dim>> q_comb = SubHyperCube<dim,dim-1>::getCombinations_R(w_comb[d],dim-2); + if (dim >= 2) + { + std::vector<comb<dim>> q_comb = SubHyperCube<dim,dim-1>::getCombinations_R(w_comb[d],dim-2); - // Eliminate the w_comb[d] direction + // Eliminate the w_comb[d] direction - for (size_t k = 0 ; k < q_comb.size() ; k++) - { - for (size_t j = 0 ; j < dim ; j++) + for (size_t k = 0 ; k < q_comb.size() ; k++) { - if (w_comb[d].c[j] != 0) + for (size_t j = 0 ; j < dim ; j++) { - q_comb[k].c[j] = 0; + if (w_comb[d].c[j] != 0) + { + q_comb[k].c[j] = 0; + } } } - } - // for all the combinations - for (size_t j = 0 ; j < q_comb.size() ; j++) - { - size_t id = hyp.LinId(q_comb[j]); + // for all the combinations + for (size_t j = 0 ; j < q_comb.size() ; j++) + { + size_t id = hyp.LinId(q_comb[j]); - // get the combination of the direction d + // get the combination of the direction d - bool is_pos = hyp.isPositive(d); + bool is_pos = hyp.isPositive(d); - // is positive, modify the stop point or the starting point + // is positive, modify the stop point or the starting point - for (size_t s = 0 ; s < dim ; s++) - { - if (is_pos == true) - {v_w.template get<wavefront<dim>::stop>(id)[s] = v_w.template get<wavefront<dim>::stop>(id)[s] + w_comb[d].c[s];} - else - {v_w.template get<wavefront<dim>::start>(id)[s] = v_w.template get<wavefront<dim>::start>(id)[s] + w_comb[d].c[s];} + for (size_t s = 0 ; s < dim ; s++) + { + if (is_pos == true) + {v_w.template get<wavefront<dim>::stop>(id)[s] = v_w.template get<wavefront<dim>::stop>(id)[s] + w_comb[d].c[s];} + else + {v_w.template get<wavefront<dim>::start>(id)[s] = v_w.template get<wavefront<dim>::start>(id)[s] + w_comb[d].c[s];} + } } } } @@ -456,8 +396,8 @@ private: /*! \brief Initialize the wavefronts * - * \param starting point of the wavefront set - * \param v_w Wavefront to initialize + * \param start_p starting point for the wavefront set + * \param v_w Wavefront array * */ void InitializeWavefront(grid_key_dx<dim> & start_p, openfpm::vector<wavefront<dim>> & v_w) @@ -479,13 +419,15 @@ private: * search in the graph for one sub-domain labelled with processor id * to use as seed * - * \tparam p_id property in the graph storing the sub-domain id + * \tparam p_id property id containing the decomposition + * \tparam p_sub property id that will contain the sub-domain decomposition * - * \param Graph graph + * \param graph Graph * \param id processor id * + * \return a valid seed key + * */ - template<unsigned int p_id, unsigned int p_sub> grid_key_dx<dim> search_seed(Graph & graph, long int id) { // if no processor is selected return the first point @@ -531,18 +473,18 @@ private: * To the domains inside the hyper-cube one sub-id is assigned. This procedure continue until * all the domain of one p_id has a sub-id * - * \tparam j property containing the decomposition - * \tparam i property to fill with the sub-decomposition + * \tparam p_id property containing the decomposition + * \tparam p_sub property to fill with the sub-domain decomposition * * \param start_p seed point * \param graph we are processing - * \param p_id Processor id (if p_id == -1 the optimization is done for all the processors) - * \param list of sub-domain boxes produced by the algorithm - * \param box_nn_processor for each box it list all the neighborhood processor + * \param pr_id Processor id (if p_id == -1 the optimization is done for all the processors) + * \param lb list of sub-domain boxes produced by the algorithm + * \param box_nn_processor for each sub-domain it list all the neighborhood processors * \param ghe Ghost extension in sub-sub-domain units in each direction - * \param bc Boundary condition - * \param init_sub_id when true p_sub property is initial set to -1 [default true] - * \param sub_id starting sub_id enumeration [default 0] + * \param init_sub_id when true p_sub property is initially set to -1 [default true] + * \param sub_id starting sub_id to enumerate them [default 0] + * \param bc boundary conditions * * \return last assigned sub-id * @@ -606,10 +548,17 @@ private: } /*! \brief Construct the sub-domain processor list + * + * \tparam p_id property that contain the decomposition * * Each entry is a sub-domain, the list of numbers indicate the neighborhood processors * - * \brief box_nn_processor + * \param graph graph to process + * \param box_nn_processor for each sub-domain it list all the neighborhood processors + * \param subs vector of sub-domains + * \param ghe ghost extensions + * \param bc boundary conditions + * \param pr_id processor that we are processing * */ template<unsigned int p_id> void construct_box_nn_processor(Graph & graph, openfpm::vector< openfpm::vector<size_t> > & box_nn_processor, const openfpm::vector<Box<dim,size_t>> & subs, const Ghost<dim,long int> & ghe, const size_t (& bc)[dim], long int pr_id) @@ -621,6 +570,7 @@ private: map.clear(); Box<dim,size_t> sub = subs.get(i); sub.enlarge(ghe); + grid_skin_iterator_bc<dim> gsi(gh,subs.get(i),sub,bc); while (gsi.isNext()) @@ -666,11 +616,13 @@ public: * the boundary until the wavefronts cannot expand any more, creating a sub-domain covering more sub-sub-domain. * This procedure continue until all the domain is covered by a sub-domains * - * \tparam j property containing the processor decomposition - * \tparam i property to fill with the sub-domain-decomposition id + * \tparam p_id property containing the processor decomposition + * \tparam p_sub property to fill with the sub-domain decomposition * * \param start_p seed point * \param graph we are processing + * \param ghe ghost size + * \param bc boundary conditions * */ template <unsigned int p_sub, unsigned int p_id> void optimize(grid_key_dx<dim> & start_p, Graph & graph, const Ghost<dim,long int> & ghe , const size_t (& bc)[dim]) @@ -691,12 +643,14 @@ public: * the boundary until the wavefronts cannot expand any more, creating a sub-domain covering more sub-sub-domain. * This procedure continue until all the sub-domain of the processor p_id are covered by a sub-domains * - * \tparam j property containing the decomposition - * \tparam i property to fill with the sub-domain-decomposition id + * \tparam p_id property containing the decomposition + * \tparam p_sub property to fill with the sub-domain decomposition * * \param graph we are processing - * \param p_id Processor id (if p_id == -1 the optimization is done for all the processors) - * \param list of sub-domain boxes + * \param pr_id Processor id (if p_id == -1 the optimization is done for all the processors) + * \param lb list of sub-domain boxes + * \param box_nn_processor for each sub-domain it list all the neighborhood processors + * \param ghe ghost size * */ template <unsigned int p_sub, unsigned int p_id> void optimize(Graph & graph, long int pr_id, openfpm::vector<Box<dim,size_t>> & lb, openfpm::vector< openfpm::vector<size_t> > & box_nn_processor, const Ghost<dim,long int> & ghe, const size_t (& bc)[dim])