From d1b1180735404632bf329943046f2ca86867d2fb Mon Sep 17 00:00:00 2001 From: Pietro Incardona <incardon@mpi-cbg.de> Date: Thu, 30 Jul 2015 19:28:58 +0200 Subject: [PATCH] Creating local ghost sync --- src/.deps/pdata-VCluster.Po | 12 +- src/.deps/pdata-main.Po | 18 +- src/Decomposition/CartDecomposition.hpp | 261 +++++++++++--- src/Grid/grid_dist_id.hpp | 437 +++++++++++++++++------- src/Grid/grid_dist_id_iterator.hpp | 138 +++++++- src/Grid/grid_dist_id_unit_test.hpp | 166 +++++++-- src/Grid/grid_dist_key.hpp | 6 +- 7 files changed, 795 insertions(+), 243 deletions(-) diff --git a/src/.deps/pdata-VCluster.Po b/src/.deps/pdata-VCluster.Po index b28ee7071..d5efeff10 100644 --- a/src/.deps/pdata-VCluster.Po +++ b/src/.deps/pdata-VCluster.Po @@ -979,11 +979,11 @@ pdata-VCluster.o: ../../OpenFPM_vcluster/src/VCluster.cpp \ ../../OpenFPM_data/src/Space/Shape/Sphere.hpp \ ../../OpenFPM_data/src/base_type.hpp \ ../../OpenFPM_data/src/Space/Shape/Point.hpp \ + ../../OpenFPM_data/src/Grid/Encap.hpp \ + ../../OpenFPM_data/src/Space/Shape/Point.hpp \ ../../OpenFPM_data/src/Grid/grid_key.hpp \ ../../OpenFPM_data/src/Grid/comb.hpp \ ../../OpenFPM_data/src/Grid/grid_key_expression.hpp \ - ../../OpenFPM_data/src/Grid/Encap.hpp \ - ../../OpenFPM_data/src/Space/Shape/Point.hpp \ ../../OpenFPM_data/src/Grid/grid_key.hpp \ ../../OpenFPM_data/src/Grid/Encap.hpp \ ../../OpenFPM_data/src/memory_ly/memory_array.hpp \ @@ -3146,16 +3146,16 @@ pdata-VCluster.o: ../../OpenFPM_vcluster/src/VCluster.cpp \ ../../OpenFPM_data/src/Space/Shape/Point.hpp: +../../OpenFPM_data/src/Grid/Encap.hpp: + +../../OpenFPM_data/src/Space/Shape/Point.hpp: + ../../OpenFPM_data/src/Grid/grid_key.hpp: ../../OpenFPM_data/src/Grid/comb.hpp: ../../OpenFPM_data/src/Grid/grid_key_expression.hpp: -../../OpenFPM_data/src/Grid/Encap.hpp: - -../../OpenFPM_data/src/Space/Shape/Point.hpp: - ../../OpenFPM_data/src/Grid/grid_key.hpp: ../../OpenFPM_data/src/Grid/Encap.hpp: diff --git a/src/.deps/pdata-main.Po b/src/.deps/pdata-main.Po index 06812b3fe..8e142faf7 100644 --- a/src/.deps/pdata-main.Po +++ b/src/.deps/pdata-main.Po @@ -941,11 +941,11 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \ ../../OpenFPM_data/src/Space/Shape/Sphere.hpp \ ../../OpenFPM_data/src/base_type.hpp \ ../../OpenFPM_data/src/Space/Shape/Point.hpp \ + ../../OpenFPM_data/src/Grid/Encap.hpp \ + ../../OpenFPM_data/src/Space/Shape/Point.hpp \ ../../OpenFPM_data/src/Grid/grid_key.hpp \ ../../OpenFPM_data/src/Grid/comb.hpp \ ../../OpenFPM_data/src/Grid/grid_key_expression.hpp \ - ../../OpenFPM_data/src/Grid/Encap.hpp \ - ../../OpenFPM_data/src/Space/Shape/Point.hpp \ ../../OpenFPM_data/src/Grid/grid_key.hpp \ ../../OpenFPM_data/src/Grid/Encap.hpp \ ../../OpenFPM_data/src/memory_ly/memory_array.hpp \ @@ -1323,6 +1323,8 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \ ../../OpenFPM_data/src/Space/Matrix.hpp \ ../../OpenFPM_data/src/util/object_util.hpp \ ../../OpenFPM_data/src/util/object_creator.hpp \ + ../../OpenFPM_data/src/util/util_debug.hpp \ + ../../OpenFPM_data/src/util/check_no_pointers.hpp \ ../../OpenFPM_data/src/util/object_s_di.hpp \ ../../OpenFPM_data/src/util/object_si_d.hpp \ ../../OpenFPM_devices/src/memory/ExtPreAlloc.hpp \ @@ -3494,16 +3496,16 @@ Graph/CartesianGraphFactory.hpp: ../../OpenFPM_data/src/Space/Shape/Point.hpp: +../../OpenFPM_data/src/Grid/Encap.hpp: + +../../OpenFPM_data/src/Space/Shape/Point.hpp: + ../../OpenFPM_data/src/Grid/grid_key.hpp: ../../OpenFPM_data/src/Grid/comb.hpp: ../../OpenFPM_data/src/Grid/grid_key_expression.hpp: -../../OpenFPM_data/src/Grid/Encap.hpp: - -../../OpenFPM_data/src/Space/Shape/Point.hpp: - ../../OpenFPM_data/src/Grid/grid_key.hpp: ../../OpenFPM_data/src/Grid/Encap.hpp: @@ -4348,6 +4350,10 @@ Grid/grid_dist_key.hpp: ../../OpenFPM_data/src/util/object_creator.hpp: +../../OpenFPM_data/src/util/util_debug.hpp: + +../../OpenFPM_data/src/util/check_no_pointers.hpp: + ../../OpenFPM_data/src/util/object_s_di.hpp: ../../OpenFPM_data/src/util/object_si_d.hpp: diff --git a/src/Decomposition/CartDecomposition.hpp b/src/Decomposition/CartDecomposition.hpp index 9fe0ed672..2a3df1105 100644 --- a/src/Decomposition/CartDecomposition.hpp +++ b/src/Decomposition/CartDecomposition.hpp @@ -37,7 +37,7 @@ * * Given an N-dimensional space, this class decompose the space into a Cartesian grid of small * sub-sub-domain. At each sub-sub-domain is assigned an id that identify which processor is - * going to take care of that part of space (in general the space assigned to a processor is a + * going to take care of that part of space (in general the space assigned to a processor is * simply connected), a second step merge several sub-sub-domain with same id into bigger region * sub-domain with the id. Each sub-domain has an extended space called ghost part * @@ -57,6 +57,7 @@ * * Near processor sub-domain: is a sub-domain that live in the a near (or contiguous) processor * * Near processor list: the list of all the near processor of the local processor (each processor has a list * of the near processor) + * * Local ghosts interal or external are all the ghosts that does not involve inter-processor communications * * \see calculateGhostBoxes() for a visualization of internal and external ghost boxes * @@ -90,23 +91,28 @@ class CartDecomposition }; //! It contain a box definition and from witch sub-domain it come from - struct Box_sub + struct Box_sub : Box<dim,T> { - ::Box<dim,T> box; - // Domain id size_t sub; + + Box_sub operator=(const Box<dim,T> & box) + { + ::Box<dim,T>::operator=(box); + + return *this; + } }; struct Box_dom { // Intersection between the local sub-domain enlarged by the ghost and the contiguous processor // sub-domains (External ghost) - openfpm::vector< Box_sub > ebx; + openfpm::vector_std< Box_sub > ebx; // Intersection between the contiguous processor sub-domain enlarged by the ghost with the // local sub-domain (Internal ghost) - openfpm::vector< Box_sub > ibx; + openfpm::vector_std< Box_sub> ibx; }; public: @@ -123,10 +129,6 @@ private: //! acc_key is size_t typedef typename data_s<SpaceBox<dim,T>,device_l<SpaceBox<dim,T>>,Memory,openfpm::vector_grow_policy_default,openfpm::vect_isel<SpaceBox<dim,T>>::value >::access_key acc_key; - //! Subspace selected - //! access_key in case of grid is just the set of the index to access the grid - std::vector<acc_key> id_sub; - //! the margin of the sub-domain selected SpaceBox<dim,T> sub_domain; @@ -151,6 +153,9 @@ private: // for each near-processor store the sub-domain of the near processor std::unordered_map<size_t, N_box> nn_processor_subdomains; + //! it contain the internal ghosts of the local processor + openfpm::vector<Box_dom> loc_ghost_box; + //! Structure that contain for each sub-domain box the processor id //! exist for efficient global communication openfpm::vector<size_t> fine_s; @@ -329,6 +334,89 @@ private: geo_cell.Initialize(domain,div,orig); } + /*! \brief Create the external local ghost boxes + * + * \param ghost margin to enlarge + * + */ + void create_loc_ghost_ebox(Ghost<dim,T> & ghost) + { + loc_ghost_box.resize(sub_domains.size()); + + // For each sub-domain + for (size_t i = 0 ; i < sub_domains.size() ; i++) + { + // add a local ghost box + loc_ghost_box.add(); + + // intersect with the other local sub-domains + for (size_t j = 0 ; j < sub_domains.size() ; j++) + { + if (i == j) + continue; + + SpaceBox<dim,T> sub_with_ghost = sub_domains.get(j); + // enlarge the sub-domain with the ghost + sub_with_ghost.enlarge(ghost); + + ::Box<dim,T> bi; + + bool intersect = sub_with_ghost.Intersect(::SpaceBox<dim,T>(sub_domains.get(j)),bi); + + if (intersect == true) + { + Box_sub b; + b.sub = j; + b = bi; + + loc_ghost_box.get(i).ibx.add(b); + } + } + } + } + + /*! \brief Create the internal local ghost boxes + * + * \param ghost margin to enlarge + * + */ + void create_loc_ghost_ibox(Ghost<dim,T> & ghost) + { + loc_ghost_box.resize(sub_domains.size()); + + // For each sub-domain + for (size_t i = 0 ; i < sub_domains.size() ; i++) + { + SpaceBox<dim,T> sub_with_ghost = sub_domains.get(i); + + // enlarge the sub-domain with the ghost + sub_with_ghost.enlarge(ghost); + + // add a local ghost box + loc_ghost_box.add(); + + // intersect with the others local sub-domains + for (size_t j = 0 ; j < sub_domains.size() ; j++) + { + if (i == j) + continue; + + ::Box<dim,T> bi; + + bool intersect = sub_with_ghost.Intersect(::SpaceBox<dim,T>(sub_domains.get(j)),bi); + + if (intersect == true) + { + Box_sub b; + b.sub = j; + b = bi; + + loc_ghost_box.get(i).ibx.add(b); + } + } + } + } + /*! \brief Create the subspaces that decompose your domain * * Create the subspaces that decompose your domain @@ -391,10 +479,10 @@ private: // enlarge the sub-domain with the ghost sub_with_ghost.enlarge(ghost); - // resize based on the number of contiguous processors + // resize based on the number of adjacent processors box_nn_processor_int.get(i).resize(box_nn_processor.get(i).size()); - // For each processor contiguous to this sub-domain + // For each processor adjacent to this sub-domain for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++) { // Contiguous processor @@ -403,7 +491,7 @@ private: // store the box in proc_int_box storing from which sub-domain they come from Box_dom & proc_int_box_g = proc_int_box.get(ProctoID(p_id)); - // get the set of sub-domains of the contiguous processor p_id + // get the set of sub-domains of the adjacent processor p_id openfpm::vector< ::Box<dim,T> > & nn_processor_subdomains_g = nn_processor_subdomains[p_id].bx; // near processor sub-domain intersections @@ -437,8 +525,8 @@ private: vb_ext.add(pb); box_nn_processor_int_gg.add(bi); proc_int_box_g.ebx.add(); - proc_int_box_g.ebx.last().box = bi; - proc_int_box_g.ebx.last().sub = 0; + proc_int_box_g.ebx.last() = bi; + proc_int_box_g.ebx.last().sub = i; } } } @@ -522,7 +610,7 @@ private: // store the box in proc_int_box storing from which sub-domain they come from Box_dom & pr_box_int = proc_int_box.get(ProctoID(p_id)); Box_sub sb; - sb.box = b_int.box; + sb = b_int.box; sb.sub = i; pr_box_int.ibx.add(sb); @@ -596,10 +684,6 @@ public: // Reset the box to zero bbox.zero(); - //! Subspace selected - //! access_key in case of grid is just the set of the index to access the grid - id_sub.swap(cd.id_sub); - //! the set of all local sub-domain as vector sub_domains.swap(cd.sub_domains); @@ -616,7 +700,7 @@ public: * */ CartDecomposition(Vcluster & v_cl) - :id_sub(0),v_cl(v_cl) + :v_cl(v_cl) { // Reset the box to zero bbox.zero(); @@ -630,7 +714,7 @@ public: * */ CartDecomposition(std::vector<size_t> dec, Domain<dim,T> domain, Vcluster & v_cl) - :id_sub(0),gr(dec),cd(domain,dec,0),domain(domain),v_cl(v_cl) + :gr(dec),cd(domain,dec,0),domain(domain),v_cl(v_cl) { // Reset the box to zero bbox.zero(); @@ -949,9 +1033,11 @@ p1[0]<-----+ +----> p2[0] // Get the sub-domains of the near processors v_cl.sendrecvMultipleMessagesNBX(nn_processors,boxes,CartDecomposition<dim,T,device_l,Memory,Domain,data_s>::message_alloc, this ,NEED_ALL_SIZE); + // create the internal structures that store ghost information create_box_nn_processor_ext(ghost); - create_box_nn_processor_int(ghost); + create_loc_ghost_ebox(ghost); + create_loc_ghost_ibox(ghost); } /*! \brief processorID return in which processor the particle should go @@ -1101,18 +1187,6 @@ p1[0]<-----+ +----> p2[0] { } - /*! \brief Select the local space - * - * Select the local space - * - * \param sub select the sub-space - * - */ - void setSpace(size_t sub) - { - id_sub.push_back(sub); - } - /*! \brief Get the local grids * @@ -1240,6 +1314,30 @@ p1[0]<-----+ +----> p2[0] return proc_int_box.get(id).ebx.size(); } + /*! \brief Get the number of external local ghost box for each sub-domain + * + * \param id sub-domain id + * + * \return the number of internal ghost box + * + */ + inline size_t getLocalNEGhost(size_t id) + { + return loc_ghost_box.get(id).ibx.size(); + } + + /*! \brief Get the number of internal local ghost box for each sub-domain + * + * \param id sub-domain id + * + * \return the number of external ghost box + * + */ + inline size_t getLocalNIGhost(size_t id) + { + return loc_ghost_box.get(id).ebx.size(); + } + /*! \brief Get the j Internal ghost box for one processor * * \param id near processor list id (the id go from 0 to getNNProcessor()) @@ -1249,7 +1347,7 @@ p1[0]<-----+ +----> p2[0] */ inline const ::Box<dim,T> & getProcessorIGhostBox(size_t id, size_t j) const { - return proc_int_box.get(id).ibx.get(j).box; + return proc_int_box.get(id).ibx.get(j); } /*! \brief Get the j External ghost box for one processor @@ -1261,12 +1359,66 @@ p1[0]<-----+ +----> p2[0] */ inline const ::Box<dim,T> & getProcessorEGhostBox(size_t id, size_t j) const { - return proc_int_box.get(id).ibx.get(j).box; + return proc_int_box.get(id).ebx.get(j); + } + + /*! \brief Get the j internal local ghost box for the i sub-domain of the local processor + * + * \param i sub-domain + * \param j box + * \return the box + * + */ + inline const ::Box<dim,T> & getLocalIGhostBox(size_t i, size_t j) const + { + return loc_ghost_box.get(i).ibox.get(j).box; + } + + /*! \brief Get the j external local ghost box for the local processor + * + * \param i sub-domain + * \param j box + * \return the box + * + */ + inline const ::Box<dim,T> & getLocalEGhostBox(size_t i, size_t j) const + { + return loc_ghost_box.get(i).ebox.get(j).box; + } + + /*! \brief Considering that sub-domain has N internal local ghost box identified + * with the 0 <= k < N that come from the intersection of 2 sub-domains i and j + * where j is enlarged, given the sub-domain i and the id k, it return the id of + * the other sub-domain that produced the intersection + * + * \param i sub-domain + * \param k id + * \return the box + * + */ + inline const ::Box<dim,T> & getLocalIGhostSub(size_t i, size_t j) const + { + return loc_ghost_box.get(i).ibox.get(j).sub; + } + + /*! \brief Considering that sub-domain has N external local ghost box identified + * with the 0 <= k < N that come from the intersection of 2 sub-domains i and j + * where j is enlarged, given the sub-domain i and the id k, it return the id of + * the other sub-domain that produced the intersection + * + * \param i sub-domain + * \param k id + * \return the box + * + */ + inline const ::Box<dim,T> & getLocalEGhostSub(size_t i, size_t j) const + { + return loc_ghost_box.get(i).ebox.get(j).sub; } /*! \brief Get the local sub-domain at witch belong the internal ghost box * - * \param id near processor list id (the id go from 0 to getNNProcessor()) + * \param id adjacent processor list id (the id go from 0 to getNNProcessor()) * \param j box (each near processor can produce more than one internal ghost box) * \return sub-domain at which belong the internal ghost box * @@ -1388,24 +1540,24 @@ p1[0]<-----+ +----> p2[0] * * The function generate several files * - * 1) p_sub_X.vtk domain for the processor X as union of sub-domain - * 2) sub_np_c_X.vtk sub-domain of the near processors contiguous to the processor X (Color encoded) - * 3) sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the processors X sub-domains (Color encoded) - * 4) sub_X_ghost.vtk ghost for the processor X (Color encoded) + * 1) p_sub_X.vtk domain for the local processor as union of sub-domain (Boxes) + * 2) sub_np_c_X.vtk sub-domain of the near adjacent processors to the processor local processor (Color encoded) + * 3) sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the local processor sub-domains (Color encoded) + * 4) sub_X_ghost.vtk ghost of the local processor (Color encoded) * - * where X is the processor number + * where X is the local processor rank * * \param output directory where to write the files * */ bool write(std::string output) const { - //! p_sub_X.vtk domain for the processor X as union of sub-domain + //! p_sub_X.vtk domain for the local processor as union of sub-domain (Boxes) VTKWriter<openfpm::vector<::SpaceBox<dim,T>>,VECTOR_BOX> vtk_box1; vtk_box1.add(sub_domains); vtk_box1.write(output + std::string("p_sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk")); - //! sub_np_c_X.vtk sub-domain of the near processors contiguous to the processor X (Color encoded) + //! sub_np_c_X.vtk sub-domain of the near adjacent processors to the processor local processor (Color encoded) VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box2; for (size_t p = 0 ; p < nn_processors.size() ; p++) { @@ -1416,7 +1568,7 @@ p1[0]<-----+ +----> p2[0] } vtk_box2.write(output + std::string("sub_np_c_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk")); - //! sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the processors X sub-domains (Color encoded) + //! sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the local processor sub-domains (Color encoded) VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box3; for (size_t p = 0 ; p < box_nn_processor_int.size() ; p++) { @@ -1427,8 +1579,7 @@ p1[0]<-----+ +----> p2[0] } vtk_box3.write(output + std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_inte_g_np") + std::string(".vtk")); - - //! sub_X_ghost.vtk ghost for the processor X (Color encoded) + //! ghost of the local processor (Color encoded) VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box4; for (size_t p = 0 ; p < box_nn_processor_int.size() ; p++) { @@ -1439,6 +1590,20 @@ p1[0]<-----+ +----> p2[0] } vtk_box4.write(output + std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_ghost") + std::string(".vtk")); + //! local external ghost of the local processor (Color encoded per domain) +/* VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box5; + for (size_t p = 0 ; p < loc_ghost_box.size() ; p++) + { + vtk_box5.add(loc_ghost_box.get(p).ibx.); + } + + //! local internal ghost of the local processor (Color encoded per domain) + VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box6; + for (size_t p = 0 ; p < loc_ghost_box.size() ; p++) + { + + }*/ + return true; } }; diff --git a/src/Grid/grid_dist_id.hpp b/src/Grid/grid_dist_id.hpp index a86bc0b05..636aa5559 100644 --- a/src/Grid/grid_dist_id.hpp +++ b/src/Grid/grid_dist_id.hpp @@ -34,7 +34,6 @@ * \param device type of base structure is going to store the data * */ - template<unsigned int dim, typename St, typename T, typename Decomposition,typename Memory=HeapMemory , typename device_grid=grid_cpu<dim,T> > class grid_dist_id { @@ -109,98 +108,6 @@ class grid_dist_id } } -// bool link_ig_eg_init = false; - - /*! \brief Link the internal ghost boxes with the internal ghost boxes - * - * Each internal ghost box is linked with the external ghost box of the neighbor - * processor, in this function, the processors send the external ghost boxes to - * the near processors and link the internal ghost boxes to the received external - * - */ -/* void link_ig_eg() - { - if (link_ig_eg == true) return; - - openfpm::vector< openfpm::vector< ::Box<dim,T>> > e_box_link(eg_box.size()); - openfpm::vector<size_t> prc_b; - - // Create a vector with external ghost boxes to send for each processor - for (size_t i = 0; i < eg_box.size() ; i++) - { - for (size_t j = 0 ; j < eg_box.get(i).bid.size() ;j++) - { - e_box_link.add(eg_box.get(i).bid.get(j)); - } - - prc_b.add(eg_box.get(i).prc); - } - - // Exchange the information - - v_cl.sendrecvMultipleMessagesNBX(prc_b,e_box_link,msg_alloc_external_box,this); - - // create a vector of boxes from the received messages - //! None eg_box.size() == ig_box.size() == dec.getNNProcessors() - for (size_t i = 0; i < dec.getNNProcessors() ; i++) - { - size_t n_ele = recv_sz.get(i) / sizeof(::Box<dim,T>); - - // Pointer of the received positions for each near processor - void * ptr_boxes = recv_mem_gg.get(i).getPointer(); - - PtrMemory * ptr1 = new PtrMemory(ptr_boxes,n_ele * sizeof(point)); - - // received external ghost boxes in vector representation - openfpm::vector< ::Box<dim,T>,openfpm::device_cpu< ::Box<dim,T>> ,PtrMemory,openfpm::grow_policy_identity> r_eg_box; - - // for each received external boxes - for (size_t j = 0 ; j < r_eg_box.size() ; j++) - { - // get the middle point - Point<dim,T> CM = r_eg_box.get(j).middle(); - - // internal ghost with maximum volume - long int max_vol_id = -1; - T max_vol = 0; - - // Get the internal ghost boxes that fall into this point - auto b_it = dec.getInternalIDBoxes(); - - // Here we intersect each received external box with all - // our internal ghost boxes, in theory only one box should match - // the intersection, but we take the one with maximum, volume intersection - while (b_it.isNext()) - { - size_t b_id = b_it.get(); - - // Get the internal ghost box - const Box<dim,T> & b = dec.getIGhostBox(b_id); - - // out intersection - Box<dim,T> b_out; - // intersect - bool intersect = b.Intersect(b,b_out); - - // if intersect - if (intersect == true && b_out.getVolume() > max_vol) - { - max_vol = b_out.getVolume(); - max_vol_id = b_id; - } - - ++b_it; - } - - // Link - - ig_box.get(i).bid.get(max_vol_id).r_id = j; - } - } - - link_ig_eg_init = true; - }*/ - // Receiving size openfpm::vector<size_t> recv_sz; @@ -257,11 +164,14 @@ class grid_dist_id for (size_t j = 0 ; j < dec.getProcessorNIGhost(i) ; j++) { // Get the internal ghost boxes and transform into grid units - ::Box<dim,St> ib = dec.getProcessorIGhostBox(i,j); - ib /= cd_sm.getCellBox().getP2(); + ::Box<dim,St> ib_dom = dec.getProcessorIGhostBox(i,j); + ::Box<dim,size_t> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); + + // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it + if (ib.isValid() == false) + continue; // save the box and the sub-domain id (it is calculated as the linearization of P1) - // It is unique because it is ensured that boxes does not overlap ::Box<dim,size_t> cvt = ib; i_box_id bid_t; @@ -269,9 +179,6 @@ class grid_dist_id bid_t.g_id = g.LinId(bid_t.box.getKP1()); bid_t.sub = dec.getProcessorIGhostSub(i,j); pib.bid.add(bid_t); - - // Add the element in the unordered map - g_id_to_external_ghost_box[bid_t.g_id] = bid_t.sub; } } @@ -297,12 +204,17 @@ class grid_dist_id pib.prc = dec.IDtoProc(i); for (size_t j = 0 ; j < dec.getProcessorNEGhost(i) ; j++) { - // Get the internal ghost boxes and transform into grid units - ::Box<dim,St> ib = dec.getProcessorEGhostBox(i,j); - ib /= cd_sm.getCellBox().getP2(); + // Get the external ghost boxes and transform into grid units + ::Box<dim,St> ib_dom = dec.getProcessorEGhostBox(i,j); + ::Box<dim,size_t> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); + + // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it + if (ib.isValid() == false) + continue; // save the box and the unique external ghost box id (linearization of P1) // It is (locally) unique because it is ensured that external ghost boxes does not overlap + // Carefull it is not unique from the internal ghost box ::Box<dim,size_t> cvt = ib; // sub domain id at which belong the external ghost box @@ -313,21 +225,160 @@ class grid_dist_id bid_t.g_e_box = cvt; bid_t.l_e_box = cvt; // Translate in local coordinate - bid_t.l_e_box -= gdb_ext.get(sub_id).origin.template convertPoint<size_t>(); + Box<dim,long int> tb = cvt; + tb -= gdb_ext.get(sub_id).origin; + bid_t.l_e_box = tb; pib.bid.add(bid_t); + + // Add the map between the global ghost box id and id of the external box in the vector + g_id_to_external_ghost_box[g.LinId(cvt.getKP1())] = pib.bid.size()-1; } } init_e_g_box = true; } + bool init_local_i_g_box = false; + + /*! \brief Create local internal ghost box in grid units + * + */ + void create_local_ig_box() + { + // Get the grid info + auto g = cd_sm.getGrid(); + + if (init_local_i_g_box == true) return; + + // Get the number of near processors + for (size_t i = 0 ; i < dec.getNLocalHyperCube() ; i++) + { + loc_ig_box.add(); + auto&& pib = loc_ig_box.last(); + + for (size_t j = 0 ; j < dec.getLocalNIGhost(i) ; j++) + { + // Get the internal ghost boxes and transform into grid units + ::Box<dim,St> ib_dom = dec.getLocalIGhostBox(i,j); + ::Box<dim,size_t> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); + + // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it + if (ib.isValid() == false) + continue; + + pib.ibx.add(); + pib.ibx.last() = ib; + pib.ibx.last().sub = dec.getLocalIGhostSub(i,j); + } + } + + init_local_i_g_box = true; + } + + bool init_local_e_g_box = false; + + /*! \brief Create per-processor internal ghost box list in grid units + * + */ + void create_local_eg_box() + { + // Get the grid info + auto g = cd_sm.getGrid(); + + if (init_local_e_g_box == true) return; + + // Get the number of near processors + for (size_t i = 0 ; i < dec.getNLocalHyperCube() ; i++) + { + loc_eg_box.add(); + auto&& pib = loc_eg_box.last(); + + for (size_t j = 0 ; j < dec.getLocalNEGhost(i) ; j++) + { + // Get the internal ghost boxes and transform into grid units + ::Box<dim,St> ib_dom = dec.getLocalEGhostBox(i,j); + ::Box<dim,size_t> ib = cd_sm.convertDomainSpaceIntoGridUnits(ib_dom); + + // Check if ib is valid if not it mean that the internal ghost does not contain information so skip it + if (ib.isValid() == false) + continue; + + pib.ebx.add(); + pib.ebx.last() = ib; + pib.ebx.last().sub = dec.getLocalEGhostSub(i,j); + } + } + + init_local_e_g_box = true; + } + + /*! \brief Sync the local ghost part + * + * \tparam prp... properties to sync + * + */ + template<int... prp> void ghost_get_local() + { + //! For all the sub-domains + for (size_t i = 0 ; i < loc_ig_box.size() ; i++) + { + //! For all the internal ghost boxes of each sub-domain + for (size_t j = 0 ; j < loc_ig_box.get(i).bid.size() ; j++) + { + Box<dim,size_t> & bx_src = loc_ig_box.get(i).bid.get(j).box; + + // sub domain connected + size_t sub_id = loc_ig_box.get(i).bid.get(j).sub; + + // local external ghost box connected + size_t e_box_sub = loc_ig_box.get(i).bid.get(j).e_b; + + Box<dim,size_t> & bx_dst = loc_eg_box.get(sub_id).bid.get(e_box_sub).box; + + // create 2 sub grid iterator + grid_key_dx_iterator_sub<dim> sub_src(loc_grid.get(i).getGrid(),bx_src.getKP1(),bx_src.getKP2()); + grid_key_dx_iterator_sub<dim> sub_dst(loc_grid.get(sub_id).getGrid(),bx_dst.getKP1(),bx_dst.getKP2()); + +#ifdef DEBUG + + if (sub_src.getVolume() != sub_dst.getVolume()) + std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination does not match in size" << "\n"; + +#endif + + const auto & gs = loc_grid.get(i); + auto & gd = loc_grid.get(sub_id); + + while (sub_src.isNext()) + { + // Option 1 + gd.set(sub_src.get(),gs,sub_src.get()); + + // Option 2 + gd.get_o(sub_src.get()) = gs.get_o(sub_dst.get()); + + ++sub_src; + ++sub_dst; + } + } + } + } + + public: //! constructor grid_dist_id(Vcluster v_cl, Decomposition & dec, const size_t (& g_sz)[dim], const Box<dim,St> & domain, const Ghost<dim,T> & ghost) - :domain(domain),ghost(ghost),loc_grid(NULL),cd_sm(domain,g_sz,0),v_cl(v_cl),dec(dec) + :domain(domain),ghost(ghost),loc_grid(NULL),v_cl(v_cl),dec(dec) { + // For a 5x5 grid you have 4x4 Cell + size_t c_g[dim]; + for (size_t i = 0 ; i < dim ; i++) {c_g[i] = g_sz[i]-1;} + + // Initialize the cell decomposer + cd_sm.setDimensions(domain,c_g,0); + // fill the global size of the grid for (int i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];} @@ -359,8 +410,15 @@ public: * */ grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g) - :domain(domain),ghost(g),dec(Decomposition(*global_v_cluster)),cd_sm(domain,g_sz,0),v_cl(*global_v_cluster) + :domain(domain),ghost(g),dec(Decomposition(*global_v_cluster)),v_cl(*global_v_cluster) { + // For a 5x5 grid you have 4x4 Cell + size_t c_g[dim]; + for (size_t i = 0 ; i < dim ; i++) {c_g[i] = g_sz[i]-1;} + + // Initialize the cell decomposer + cd_sm.setDimensions(domain,c_g,0); + // fill the global size of the grid for (size_t i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];} @@ -396,15 +454,30 @@ public: return dec; } - /*! \brief Create the grid on memory + /*! \brief Return the cell decomposer + * + * \return the cell decomposer + * + */ + const CellDecomposer_sm<dim,St> & getCellDecomposer() + { + return cd_sm; + } + + /*! \brief Create + * + * + */ + + /*! \brief Create the grids on memory * */ void Create() { // Box used for rounding error - Box<dim,St> rnd_box; - for (size_t i = 0 ; i < dim ; i++) {rnd_box.setHigh(i,0.5); rnd_box.setLow(i,0.5);} +// Box<dim,St> rnd_box; +// for (size_t i = 0 ; i < dim ; i++) {rnd_box.setHigh(i,0.5); rnd_box.setLow(i,0.5);} // Box used for rounding in case of ghost Box<dim,St> g_rnd_box; for (size_t i = 0 ; i < dim ; i++) {g_rnd_box.setHigh(i,0.5); g_rnd_box.setLow(i,-0.5);} @@ -431,14 +504,8 @@ public: // Get the local hyper-cube SpaceBox<dim,St> sp = dec.getLocalHyperCube(i); - // Convert sp into grid units - sp /= cd_sm.getCellBox().getP2(); - - // enlarge by 0.5 for rounding - sp.enlarge(rnd_box); - // Convert from SpaceBox<dim,float> to SpaceBox<dim,long int> - SpaceBox<dim,long int> sp_t = sp; + SpaceBox<dim,long int> sp_t = cd_sm.convertDomainSpaceIntoGridUnits(sp); // convert the ghost from space coordinate to grid units Ghost<dim,St> g_int = ghost; @@ -461,7 +528,6 @@ public: gdb_ext.last().Dbox = sp_t; gdb_ext.last().Dbox -= g_int_t.getP1(); // needed because the last key coordinate is size - 1 on each direction - gdb_ext.last().Dbox.shrinkP2(1); // The origin is the Domain box + ghost, so shift gdb_ext.last().origin += g_int_t.getP1(); @@ -469,25 +535,48 @@ public: sp_t.enlarge_fix_P1(g_int_t); // Get the size of the local grid - for (size_t i = 0 ; i < dim ; i++) {l_res[i] = sp_t.getHigh(i);} + for (size_t i = 0 ; i < dim ; i++) {l_res[i] = (sp_t.getHigh(i) >= 0)?(sp_t.getHigh(i)+1):0;} // Set the dimensions of the local grid loc_grid.get(i).template resize<Memory>(l_res); } } - /*! \brief It return an iterator of the bulk part of the grid with a specified margin + /*! \brief Check that the global grid key is inside the grid domain * - * For margin we mean that every point is at least m points far from the border + * \return true if is inside * - * \param m margin + */ + bool isInside(const grid_key_dx<dim> & gk) const + { + for (size_t i = 0 ; i < dim ; i++) + { + if (gk.get(i) < 0 || gk.get(i) >= (long int)g_sz[i]) + return false; + } + + return true; + } + + /*! \brief It return an iterator that span the full grid domain (each processor span its local domain) + * + * \return the iterator + * + */ + grid_dist_iterator<dim,device_grid,FREE> getDomainIterator() + { + grid_dist_iterator<dim,device_grid,FREE> it(loc_grid,gdb_ext); + + return it; + } + + /*! \brief It return an iterator that span the grid domain + ghost part * - * \return An iterator to a grid with specified margins * */ - grid_dist_iterator<dim,device_grid> getDomainIterator() + grid_dist_iterator<dim,device_grid,FIXED> getDomainGhostIterator() { - grid_dist_iterator<dim,device_grid> it(loc_grid,gdb_ext); + grid_dist_iterator<dim,device_grid,FIXED> it(loc_grid,gdb_ext); return it; } @@ -535,6 +624,22 @@ public: size_t sub; }; + /*! \brief it store an internal ghost box, the linked external ghost box and the sub-domain from where + * it come from as internal ghost box + * + */ + struct i_lbox_id + { + //! Box + ::Box<dim,size_t> box; + + //! sub-domain id + size_t sub; + + //! external box + size_t e_b; + }; + /*! \brief It store the information about the external ghost box * * @@ -551,6 +656,19 @@ public: size_t sub; }; + /*! \brief It store the information about the external ghost box + * + * + */ + struct e_lbox_id + { + //! Box defining the external ghost box in local coordinates + ::Box<dim,size_t> box; + + //! sub_id in which sub-domain this box live + size_t sub; + }; + /*! \brief Per-processor Internal ghost box * */ @@ -563,6 +681,15 @@ public: size_t prc; }; + /*! \brief local Internal ghost box + * + */ + struct i_lbox_grid + { + // ghost in grid units + openfpm::vector<i_lbox_id> bid; + }; + /*! \brief Per-processor external ghost box * */ @@ -575,6 +702,15 @@ public: size_t prc; }; + /*! \brief Per-processor external ghost box + * + */ + struct e_lbox_grid + { + // ghost in grid units + openfpm::vector<e_lbox_id> bid; + }; + //! Memory for the ghost sending buffer Memory g_send_prp_mem; @@ -593,6 +729,12 @@ public: //! External ghost boxes in grid units openfpm::vector<ep_box_grid> eg_box; + //! Local internal ghost boxes in grid units + openfpm::vector<i_lbox_grid> loc_ig_box; + + //! Local external ghost boxes in grid units + openfpm::vector<e_lbox_grid> loc_eg_box; + /*! \brief It synchronize getting the ghost part of the grid * * \tparam prp Properties to get (sequence of properties ids) @@ -673,6 +815,8 @@ public: // Calculate the total information to receive from each processors std::vector<size_t> prp_recv; + // Calculate the unpacking sequence + std::vector<size_t> prp_unpack; //! Receive the information from each processors for ( size_t i = 0 ; i < eg_box.size() ; i++ ) @@ -684,9 +828,11 @@ public: { // External ghost box Box<dim,size_t> g_eg_box = eg_box.get(i).bid.get(j).g_e_box; - - // prp_recv[prp_recv.size()-1] += g_eg_box.getVolumeKey() * sizeof(prp_object) + sizeof(size_t); + + // unpack sequence + prp_unpack.push_back(sizeof(size_t)); + prp_unpack.push_back(g_eg_box.getVolumeKey() * sizeof(prp_object)); } } @@ -694,20 +840,26 @@ public: g_recv_prp_mem.resize(ExtPreAlloc<Memory>::calculateMem(prp_recv)); // Create an object of preallocated memory for properties - ExtPreAlloc<Memory> & prRecv_prp = *(new ExtPreAlloc<Memory>(prp_recv,g_recv_prp_mem)); + ExtPreAlloc<Memory> & prRecv_prp = *(new ExtPreAlloc<Memory>(prp_unpack,g_recv_prp_mem)); prRecv_prp.incRef(); - // queue the receive - + // queue the receives + size_t offset = 0; for ( size_t i = 0 ; i < eg_box.size() ; i++ ) { - v_cl.recv(eg_box.get(i).prc,0,prRecv_prp.getPointer(i),prp_recv[i]); + v_cl.recv(eg_box.get(i).prc,0,prRecv_prp.getPointerOffset(offset),prp_recv[i]); + offset += prp_recv[i]; } + // Before wait for the communication to complete we sync the local ghost + // in order to overlap with communication + + ghost_get_local<prp...>(); + // wait to receive communication v_cl.execute(); - Pack_stat ps; + Unpack_stat ps; // Unpack the object for ( size_t i = 0 ; i < eg_box.size() ; i++ ) @@ -751,6 +903,27 @@ public: } } + /*! \brief Convert a g_dist_key_dx into a global key + * + * \see grid_dist_key_dx + * \see grid_dist_key_dx_iterator + * + * \return the global position in the grid + * + */ + inline grid_key_dx<dim> getGKey(const grid_dist_key_dx<dim> & k) + { + // Get the sub-domain id + size_t sub_id = k.getSub(); + + grid_key_dx<dim> k_glob = k.getKey(); + + // shift + k_glob = k_glob + gdb_ext.get(sub_id).origin; + + return k_glob; + } + /*! \brief Write the grid_dist_id information as VTK file * * The function generate several files @@ -929,9 +1102,9 @@ public: * \return An iterator to a grid with specified margins * */ - grid_dist_iterator<1,device_grid> getDomainIterator() + grid_dist_iterator<1,device_grid,FREE> getDomainIterator() { - grid_dist_iterator<1,device_grid> it(loc_grid,gdb_ext); + grid_dist_iterator<1,device_grid,FREE> it(loc_grid,gdb_ext); return it; } diff --git a/src/Grid/grid_dist_id_iterator.hpp b/src/Grid/grid_dist_id_iterator.hpp index 682b51879..7a50cebc7 100644 --- a/src/Grid/grid_dist_id_iterator.hpp +++ b/src/Grid/grid_dist_id_iterator.hpp @@ -45,17 +45,41 @@ struct GBoxes Point<dim,long int> origin; }; +#define FREE 1 +#define FIXED 2 + #include "grid_dist_key.hpp" #include "VCluster.hpp" + + /*! \brief Distributed grid iterator * * Iterator across the local elements of the distributed grid * + * \tparam dim dimensionality of the grid + * \tparam device_grid type of basic grid + * \tparam impl implementation + * */ +template<unsigned int dim, typename device_grid, int impl > +class grid_dist_iterator +{ + +}; + +/*! \brief Distributed grid iterator + * + * Iterator across the local elements of the distributed grid + * + * \tparam dim dimensionality of the grid + * \tparam device_grid type of basic grid + * \tparam impl implementation + * + */ template<unsigned int dim, typename device_grid> -class grid_dist_iterator +class grid_dist_iterator<dim,device_grid,FREE> { //! grid list counter size_t g_c; @@ -92,21 +116,108 @@ class grid_dist_iterator { } - /*! \brief operator= + /*! \brief Get the next element * - * assign + * \return the next grid_key * */ -/* grid_dist_iterator<dim,device_grid> & operator=(const grid_dist_iterator<dim,device_grid> & gdi) + + grid_dist_iterator<dim,device_grid,FREE> operator++() { - g_c = gdi.g_c; - gList = gdi.gList; - a_it = gdi.a_it; - m = gdi.m; - gdb_ext = gdi.gdb_ext; + ++a_it; + + // check if a_it is at the end + + if (a_it.isNext() == true) + return *this; + else + { + // switch to the new grid + g_c++; + + // When the grid has size 0 potentially all the other informations are garbage + while (gList[g_c].size() == 0 ) g_c++; + + // get the next grid iterator + if (g_c < gList.size()) + { + a_it.reinitialize(gList[g_c].getIterator(gdb_ext.get(g_c).Dbox.getKP1(),gdb_ext.get(g_c).Dbox.getKP2())); + } + } return *this; - }*/ + } + + /*! \brief Check if there is the next element + * + * \return true if there is the next, false otherwise + * + */ + bool isNext() + { + // If there are no other grid stop + + if (g_c >= gList.size()) + return false; + + return true; + } + + /*! \brief Get the actual key + * + * \return the actual key + * + */ + grid_dist_key_dx<dim> get() + { + return grid_dist_key_dx<dim>(g_c,a_it.get()); + } +}; + + +/*! \brief Distributed grid iterator + * + * Iterator across the local elements of the distributed grid + * + * \tparam dim dimensionality of the grid + * \tparam device_grid type of basic grid + * \tparam impl implementation + * + */ +template<unsigned int dim, typename device_grid> +class grid_dist_iterator<dim,device_grid,FIXED> +{ + //! grid list counter + size_t g_c; + + //! List of the grids we are going to iterate + Vcluster_object_array<device_grid> & gList; + + //! Extension of each grid: domain and ghost + domain + const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext; + + //! Actual iterator + grid_key_dx_iterator<dim> a_it; + + public: + + /*! \brief Constructor of the distributed grid + * + * \param gk std::vector of the local grid + * + */ + grid_dist_iterator(Vcluster_object_array<device_grid> & gk, const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext) + :g_c(0),gList(gk),gdb_ext(gdb_ext) + { + // Initialize the current iterator + // with the first grid + a_it.reinitialize(gList[0].getIterator()); + } + + // Destructor + ~grid_dist_iterator() + { + } /*! \brief Get the next element * @@ -114,7 +225,7 @@ class grid_dist_iterator * */ - grid_dist_iterator<dim,device_grid> operator++() + grid_dist_iterator<dim,device_grid,FIXED> operator++() { ++a_it; @@ -125,9 +236,11 @@ class grid_dist_iterator else { // switch to the new grid - g_c++; + // When the grid has size 0 potentially all the other informations are garbage + while (gList[g_c].size() == 0 ) g_c++; + // get the next grid iterator if (g_c < gList.size()) { @@ -164,5 +277,4 @@ class grid_dist_iterator } }; - #endif /* GRID_DIST_ID_ITERATOR_SUB_HPP_ */ diff --git a/src/Grid/grid_dist_id_unit_test.hpp b/src/Grid/grid_dist_id_unit_test.hpp index 8a9cbff96..9be882ec7 100644 --- a/src/Grid/grid_dist_id_unit_test.hpp +++ b/src/Grid/grid_dist_id_unit_test.hpp @@ -28,7 +28,7 @@ template<typename iterator> void jacobi_iteration(iterator g_it, grid_dist_id<2, } } -BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use) +BOOST_AUTO_TEST_CASE( grid_dist_id_domain_grid_unit_converter_test) { // Domain Box<2,float> domain({0.0,0.0},{1.0,1.0}); @@ -36,58 +36,154 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use) // Initialize the global VCluster init_global_v_cluster(&boost::unit_test::framework::master_test_suite().argc,&boost::unit_test::framework::master_test_suite().argv); - // grid size - size_t sz[2] = {1024,1024}; + Vcluster & v_cl = *global_v_cluster; - // Ghost - Ghost<2,float> g(0.01); + // Test several grid dimensions - // Distributed grid with id decomposition - grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g); + for (size_t k = 1024 ; k > 1 ; k--) + { + std::cout << "Testing: " << k << "\n"; - // get the domain iterator - size_t count = 0; + // grid size + size_t sz[2]; + sz[0] = k; + sz[1] = k; - auto dom = g_dist.getDomainIterator(); + // Ghost + Ghost<2,float> g(0.01); - while (dom.isNext()) - { - auto key = dom.get(); + // Distributed grid with id decomposition + grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g); - g_dist.template get<0>(key) = count; + // get the decomposition + auto & dec = g_dist.getDecomposition(); - // Count the point - count++; + // for each local volume + // Get the number of local grid needed + size_t n_grid = dec.getNLocalHyperCube(); - ++dom; - } + size_t vol = 0; - // Get the virtual cluster machine - Vcluster & vcl = g_dist.getVC(); + // Allocate the grids + for (size_t i = 0 ; i < n_grid ; i++) + { + // Get the local hyper-cube + SpaceBox<2,float> sub = dec.getLocalHyperCube(i); - // reduce - vcl.reduce(count); - vcl.execute(); + Box<2,size_t> g_box = g_dist.getCellDecomposer().convertDomainSpaceIntoGridUnits(sub); - // Check - BOOST_REQUIRE_EQUAL(count,1024*1024); + vol += g_box.getVolumeKey(); + } - size_t count_check = 0; - auto dom2 = g_dist.getDomainIterator(); + v_cl.reduce(vol); + v_cl.execute(); - while (dom2.isNext()) + BOOST_REQUIRE_EQUAL(vol,sz[0]*sz[1]); + } +} + +BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use) +{ + // Domain + Box<2,float> domain({0.0,0.0},{1.0,1.0}); + + // Initialize the global VCluster + init_global_v_cluster(&boost::unit_test::framework::master_test_suite().argc,&boost::unit_test::framework::master_test_suite().argv); + + for (long int k = 1026 ; k > 1 ; k-= 33) { - auto key = dom2.get(); + // grid size + size_t sz[2]; + sz[0] = k; + sz[1] = k; - BOOST_REQUIRE_EQUAL(g_dist.template get<0>(key),count_check); + // Ghost + Ghost<2,float> g(0.01); - count_check++; - ++dom2; - } + // Distributed grid with id decomposition + grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g); + + // Grid sm + grid_sm<2,void> info(sz); + + // get the domain iterator + size_t count = 0; + + auto dom = g_dist.getDomainIterator(); - g_dist.template ghost_get<0>(); + while (dom.isNext()) + { + auto key = dom.get(); + auto key_g = g_dist.getGKey(key); + + g_dist.template get<0>(key) = info.LinId(key_g); + + // Count the point + count++; + + ++dom; + } + + // Get the virtual cluster machine + Vcluster & vcl = g_dist.getVC(); + + // reduce + vcl.reduce(count); + vcl.execute(); + + // Check + BOOST_REQUIRE_EQUAL(count,k*k); + + auto dom2 = g_dist.getDomainIterator(); + + // check that the grid store the correct information + while (dom2.isNext()) + { + auto key = dom2.get(); + auto key_g = g_dist.getGKey(key); + + if (key_g.get(0) == 503 && key_g.get(1) == 779) + { + int debug = 0; + debug++; + } + + BOOST_REQUIRE_EQUAL(g_dist.template get<0>(key),info.LinId(key_g)); + + ++dom2; + } + + g_dist.template ghost_get<0>(); + + // check that the communication is correctly completed + + auto domg = g_dist.getDomainGhostIterator(); + + // check that the grid with the ghost past store the correct information + while (domg.isNext()) + { + auto key = domg.get(); + auto key_g = g_dist.getGKey(key); + + // In this case the boundary condition are non periodic + if (g_dist.isInside(key_g)) + { + if (g_dist.template get<0>(key) != info.LinId(key_g)) + { + int debug = 0; + debug++; + } + + BOOST_REQUIRE_EQUAL(g_dist.template get<0>(key),info.LinId(key_g)); + } + + ++domg; + } + + + } - g_dist.write(""); +// g_dist.write(""); /* auto g_it = g_dist.getIteratorBulk(); diff --git a/src/Grid/grid_dist_key.hpp b/src/Grid/grid_dist_key.hpp index ab8ef5b20..8cc296c08 100644 --- a/src/Grid/grid_dist_key.hpp +++ b/src/Grid/grid_dist_key.hpp @@ -3,7 +3,7 @@ /*! \brief Grid key for a distributed grid * - * Grid key for a distributed grid + * It contain from which local sub-domain grid come from, and the local grid_key_dx * */ @@ -25,7 +25,7 @@ public: * \return the id of the local grid * */ - size_t getSub() + size_t getSub() const { return g_c; } @@ -35,7 +35,7 @@ public: * \return the local key * */ - grid_key_dx<dim> getKey() + grid_key_dx<dim> getKey() const { return key; } -- GitLab