Skip to content
Snippets Groups Projects
Commit c3e6ff5f authored by Pietro Incardona's avatar Pietro Incardona
Browse files

Adding ghost area, compiling and testing

parent 726c6d4c
No related branches found
No related tags found
No related merge requests found
......@@ -1298,14 +1298,12 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \
Grid/grid_dist_key.hpp \
../../OpenFPM_data/src/NN/CellList/CellDecomposer.hpp \
../../OpenFPM_data/src/Space/Matrix.hpp \
../../OpenFPM_data/src/Point_test.hpp \
../../OpenFPM_data/src/base_type.hpp \
../../OpenFPM_data/src/Point_orig.hpp \
../../OpenFPM_data/src/Grid/Encap.hpp \
Decomposition/CartDecomposition.hpp Decomposition/Decomposition.hpp \
../../OpenFPM_data/src/global_const.hpp SubdomainGraphNodes.hpp \
metis_util.hpp ../../metis_install/include/metis.h \
/usr/include/inttypes.h ../../OpenFPM_IO/src/VTKWriter.hpp \
../../OpenFPM_data/src/util/object_util.hpp \
../../OpenFPM_data/src/util/object_creator.hpp \
../../OpenFPM_data/src/util/object_s_di.hpp \
../../OpenFPM_data/src/util/object_si_d.hpp \
../../OpenFPM_devices/src/memory/ExtPreAlloc.hpp \
../../OpenFPM_IO/src/VTKWriter.hpp \
../../OpenFPM_IO/src/VTKWriter_graph.hpp \
../../OpenFPM_IO/src/VTKWriter_vector_box.hpp \
/usr/include/boost/math/special_functions/pow.hpp \
......@@ -1370,7 +1368,15 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \
/usr/include/boost/iostreams/detail/config/dyn_link.hpp \
/usr/include/boost/iostreams/detail/path.hpp \
/usr/include/boost/config/abi_prefix.hpp \
/usr/include/boost/config/abi_suffix.hpp dec_optimizer.hpp \
/usr/include/boost/config/abi_suffix.hpp \
../../OpenFPM_data/src/Point_test.hpp \
../../OpenFPM_data/src/base_type.hpp \
../../OpenFPM_data/src/Point_orig.hpp \
../../OpenFPM_data/src/Grid/Encap.hpp \
Decomposition/CartDecomposition.hpp Decomposition/Decomposition.hpp \
../../OpenFPM_data/src/global_const.hpp SubdomainGraphNodes.hpp \
metis_util.hpp ../../metis_install/include/metis.h \
/usr/include/inttypes.h dec_optimizer.hpp \
/usr/include/c++/4.8.3/unordered_map \
/usr/include/c++/4.8.3/bits/hashtable.h \
/usr/include/c++/4.8.3/bits/hashtable_policy.h \
......@@ -1394,11 +1400,6 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \
../../OpenFPM_devices/src/memory/PreAllocHeapMemory.hpp \
../../OpenFPM_devices/src/memory/HeapMemory.hpp \
../../OpenFPM_devices/src/memory/PtrMemory.hpp \
../../OpenFPM_data/src/util/object_util.hpp \
../../OpenFPM_data/src/util/object_creator.hpp \
../../OpenFPM_data/src/util/object_s_di.hpp \
../../OpenFPM_data/src/util/object_si_d.hpp \
../../OpenFPM_devices/src/memory/ExtPreAlloc.hpp \
../../OpenFPM_IO/src/CSVWriter.hpp \
../../OpenFPM_IO/src/csv_multiarray.hpp \
Decomposition/CartDecomposition_unit_test.hpp \
......@@ -4266,27 +4267,15 @@ Grid/grid_dist_key.hpp:
../../OpenFPM_data/src/Space/Matrix.hpp:
../../OpenFPM_data/src/Point_test.hpp:
../../OpenFPM_data/src/base_type.hpp:
../../OpenFPM_data/src/Point_orig.hpp:
../../OpenFPM_data/src/Grid/Encap.hpp:
Decomposition/CartDecomposition.hpp:
Decomposition/Decomposition.hpp:
../../OpenFPM_data/src/global_const.hpp:
../../OpenFPM_data/src/util/object_util.hpp:
SubdomainGraphNodes.hpp:
../../OpenFPM_data/src/util/object_creator.hpp:
metis_util.hpp:
../../OpenFPM_data/src/util/object_s_di.hpp:
../../metis_install/include/metis.h:
../../OpenFPM_data/src/util/object_si_d.hpp:
/usr/include/inttypes.h:
../../OpenFPM_devices/src/memory/ExtPreAlloc.hpp:
../../OpenFPM_IO/src/VTKWriter.hpp:
......@@ -4420,6 +4409,28 @@ metis_util.hpp:
/usr/include/boost/config/abi_suffix.hpp:
../../OpenFPM_data/src/Point_test.hpp:
../../OpenFPM_data/src/base_type.hpp:
../../OpenFPM_data/src/Point_orig.hpp:
../../OpenFPM_data/src/Grid/Encap.hpp:
Decomposition/CartDecomposition.hpp:
Decomposition/Decomposition.hpp:
../../OpenFPM_data/src/global_const.hpp:
SubdomainGraphNodes.hpp:
metis_util.hpp:
../../metis_install/include/metis.h:
/usr/include/inttypes.h:
dec_optimizer.hpp:
/usr/include/c++/4.8.3/unordered_map:
......@@ -4486,16 +4497,6 @@ Vector/vector_dist_key.hpp:
../../OpenFPM_devices/src/memory/PtrMemory.hpp:
../../OpenFPM_data/src/util/object_util.hpp:
../../OpenFPM_data/src/util/object_creator.hpp:
../../OpenFPM_data/src/util/object_s_di.hpp:
../../OpenFPM_data/src/util/object_si_d.hpp:
../../OpenFPM_devices/src/memory/ExtPreAlloc.hpp:
../../OpenFPM_IO/src/CSVWriter.hpp:
../../OpenFPM_IO/src/csv_multiarray.hpp:
......
......@@ -74,6 +74,20 @@ class CartDecomposition
size_t proc;
};
struct Box_dom
{
// Intersection between the local sub-domain enlarged by the ghost and the contiguous processor
// sub-domains (External ghost)
openfpm::vector< ::Box<dim,T> > ebx;
// Intersection between the contiguous processor sub-domain enlarged by the ghost with the
// local sub-domain (Internal ghost)
openfpm::vector< ::Box<dim,T> > ibx;
// Domain id
size_t dom;
};
public:
//! Type of the domain we are going to decompose
......@@ -101,11 +115,15 @@ private:
//! List of near processors
openfpm::vector<size_t> nn_processors;
//! for each sub-domain, contain the list of the neighborhood processors
//! for each sub-domain (first vector), contain the list (nested vector) of the neighborhood processors
//! and for each processor contain the boxes calculated from the intersection
//! of the sub-domain + ghost with the near-by processor sub-domain ()
//! of the sub-domains + ghost with the near-by processor sub-domain () and the other way around
//! \see calculateGhostBoxes
openfpm::vector< openfpm::vector< Box_proc > > box_nn_processor_int;
//! It store the same information of box_nn_processor_int organized by processor id
openfpm::vector< Box_dom > proc_int_box;
//! for each sub-domain, contain the list of the neighborhood processors
openfpm::vector<openfpm::vector<long unsigned int> > box_nn_processor;
......@@ -328,6 +346,157 @@ private:
}
}
/*! \brief Create the box_nn_processor_int (bx part) structure
*
* This structure store for each sub-domain of this processors enlarged by the ghost size the boxes that
* come from the intersection with the near processors sub-domains (External ghost box)
*
* \param ghost margins
*
* \note Are the G8_0 G9_0 G9_1 G5_0 boxes in calculateGhostBoxes
* \see calculateGhostBoxes
*
*/
void create_box_nn_processor_ext(Ghost<dim,T> & ghost)
{
box_nn_processor_int.resize(sub_domains.size());
// For each sub-domain
for (size_t i = 0 ; i < sub_domains.size() ; i++)
{
SpaceBox<dim,T> sub_with_ghost = sub_domains.get(i);
// enlarge the sub-domain with the ghost
sub_with_ghost.enlarge(ghost);
// resize based on the number of contiguous processors
box_nn_processor_int.get(i).resize(box_nn_processor.get(i).size());
// For each processor contiguous to this sub-domain
for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++)
{
// Contiguous processor
size_t p_id = box_nn_processor.get(i).get(j);
// get the set of sub-domains of the contiguous processor p_id
openfpm::vector< ::Box<dim,T> > & p_box = nn_processor_subdomains[p_id].bx;
// near processor sub-domain intersections
openfpm::vector< ::Box<dim,T> > & p_box_int = box_nn_processor_int.get(i).get(j).bx;
// for each near processor sub-domain intersect with the enlarged local sub-domain and store it
for (size_t b = 0 ; b < p_box.size() ; b++)
{
::Box<dim,T> bi;
bool intersect = sub_with_ghost.Intersect(::Box<dim,T>(p_box.get(b)),bi);
if (intersect == true)
{
struct p_box pb;
pb.box = bi;
pb.proc = p_id;
pb.lc_proc = ProctoID(p_id);
vb_ext.add(pb);
p_box_int.add(bi);
}
}
}
}
}
/*! \brief Create the box_nn_processor_int (nbx part) structure, the geo_cell list and proc_int_box
*
* This structure store for each sub-domain of this processors the boxes that come from the intersection
* of the near processors sub-domains enlarged by the ghost size (Internal ghost box). These boxes
* fill a geometrical cell list. The proc_int_box store the same information ordered by near processors
*
* \param ghost margins
*
* \note Are the B8_0 B9_0 B9_1 B5_0 boxes in calculateGhostBoxes
* \see calculateGhostBoxes
*
*/
void create_box_nn_processor_int(Ghost<dim,T> & ghost)
{
box_nn_processor_int.resize(sub_domains.size());
proc_int_box.resize(getNNProcessors());
// For each sub-domain
for (size_t i = 0 ; i < sub_domains.size() ; i++)
{
// For each processor contiguous to this sub-domain
for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++)
{
// Contiguous processor
size_t p_id = box_nn_processor.get(i).get(j);
// get the set of sub-domains of the contiguous processor p_id
openfpm::vector< ::Box<dim,T> > & nn_p_box = nn_processor_subdomains[p_id].bx;
// get the local processor id
size_t lc_proc = nn_processor_subdomains[p_id].id;
// For each near processor sub-domains enlarge and intersect with the local sub-domain and store the result
for (size_t k = 0 ; k < nn_p_box.size() ; k++)
{
// enlarge the near-processor sub-domain
::Box<dim,T> n_sub = nn_p_box.get(k);
// local sub-domain
::SpaceBox<dim,T> l_sub = sub_domains.get(i);
// Create a margin of ghost size around the near processor sub-domain
n_sub.enlarge(ghost);
// Intersect with the local sub-domain
p_box b_int;
bool intersect = n_sub.Intersect(l_sub,b_int.box);
// store if it intersect
if (intersect == true)
{
// fill with the processor id
b_int.proc = p_id;
// fill the local processor id
b_int.lc_proc = lc_proc;
// near processor sub-domain intersections
openfpm::vector< ::Box<dim,T> > & p_box_int = box_nn_processor_int.get(i).get(j).nbx;
p_box_int.add(b_int.box);
vb_int.add(b_int);
// store the box in proc_int_box calculating the id
Box_dom & pr_box_int = proc_int_box.get(ProctoID(p_id));
pr_box_int.ibx.add(b_int.box);
// update the geo_cell list
// get the boxes this box span
const grid_key_dx<dim> p1 = geo_cell.getCellGrid(b_int.box.getP1());
const grid_key_dx<dim> p2 = geo_cell.getCellGrid(b_int.box.getP2());
// Get the grid and the sub-iterator
auto & gi = geo_cell.getGrid();
grid_key_dx_iterator_sub<dim> g_sub(gi,p1,p2);
// add the box-id to the cell list
while (g_sub.isNext())
{
auto key = g_sub.get();
geo_cell.addCell(gi.LinId(key),vb_int.size()-1);
++g_sub;
}
}
}
}
}
}
// Heap memory receiver
HeapMemory hp_recv;
......@@ -717,116 +886,9 @@ p1[0]<-----+ +----> p2[0]
// Get the sub-domains of the near processors
v_cl.sendrecvMultipleMessagesNBX(nn_processors,boxes,CartDecomposition<dim,T,device_l,Memory,Domain,data_s>::message_alloc, this ,NEED_ALL_SIZE);
box_nn_processor_int.resize(sub_domains.size());
// For each sub-domain
for (size_t i = 0 ; i < sub_domains.size() ; i++)
{
SpaceBox<dim,T> sub_with_ghost = sub_domains.get(i);
// enlarge the sub-domain with the ghost
sub_with_ghost.enlarge(ghost);
// resize based on the number of contiguous processors
box_nn_processor_int.get(i).resize(box_nn_processor.get(i).size());
create_box_nn_processor_ext(ghost);
// For each processor contiguous to this sub-domain
for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++)
{
// Contiguous processor
size_t p_id = box_nn_processor.get(i).get(j);
// get the set of sub-domains of the contiguous processor p_id
openfpm::vector< ::Box<dim,T> > & p_box = nn_processor_subdomains[p_id].bx;
// near processor sub-domain intersections
openfpm::vector< ::Box<dim,T> > & p_box_int = box_nn_processor_int.get(i).get(j).bx;
// for each near processor sub-domain intersect with the enlarged local sub-domain and store it
for (size_t b = 0 ; b < p_box.size() ; b++)
{
::Box<dim,T> bi;
bool intersect = sub_with_ghost.Intersect(::Box<dim,T>(p_box.get(b)),bi);
if (intersect == true)
{
struct p_box pb;
pb.box = bi;
pb.proc = p_id;
pb.lc_proc = ProctoID(p_id);
vb_ext.add(pb);
p_box_int.add(bi);
}
}
}
// For each processor contiguous to this sub-domain
for (size_t j = 0 ; j < box_nn_processor.get(i).size() ; j++)
{
// Contiguous processor
size_t p_id = box_nn_processor.get(i).get(j);
// get the set of sub-domains of the contiguous processor p_id
openfpm::vector< ::Box<dim,T> > & nn_p_box = nn_processor_subdomains[p_id].bx;
// get the local processor id
size_t lc_proc = nn_processor_subdomains[p_id].id;
// near processor sub-domain intersections
openfpm::vector< ::Box<dim,T> > & p_box_int = box_nn_processor_int.get(i).get(j).nbx;
// For each near processor sub-domains enlarge and intersect with the local sub-domain and store the result
for (size_t k = 0 ; k < nn_p_box.size() ; k++)
{
// enlarge the near-processor sub-domain
::Box<dim,T> n_sub = nn_p_box.get(k);
// local sub-domain
::SpaceBox<dim,T> l_sub = sub_domains.get(i);
// Create a margin of ghost size around the near processor sub-domain
n_sub.enlarge(ghost);
// Intersect with the local sub-domain
p_box b_int;
bool intersect = n_sub.Intersect(l_sub,b_int.box);
// store if it intersect
if (intersect == true)
{
// fill with the processor id
b_int.proc = p_id;
// fill the local processor id
b_int.lc_proc = lc_proc;
p_box_int.add(b_int.box);
vb_int.add(b_int);
// update the geo_cell list
// get the boxes this box span
const grid_key_dx<dim> p1 = geo_cell.getCellGrid(b_int.box.getP1());
const grid_key_dx<dim> p2 = geo_cell.getCellGrid(b_int.box.getP2());
// Get the grid and the sub-iterator
auto & gi = geo_cell.getGrid();
grid_key_dx_iterator_sub<dim> g_sub(gi,p1,p2);
// add the box-id to the cell list
while (g_sub.isNext())
{
auto key = g_sub.get();
geo_cell.addCell(gi.LinId(key),vb_int.size()-1);
++g_sub;
}
}
}
}
}
create_box_nn_processor_int(ghost);
}
/*! \brief processorID return in which processor the particle should go
......@@ -1090,6 +1152,37 @@ p1[0]<-----+ +----> p2[0]
}
////////////// Functions to get decomposition information ///////////////
/*! \brief Get the number of Internal ghost boxes for one processor id
*
* \param id processor id (Carefully it is not the processor number)
* \return the number of internal ghost
*
*/
inline size_t getProcessorNIGhost(size_t id) const
{
return proc_int_box.get(id).ibx.size();
}
/*! \brief Get the j Internal ghost box for one processor id
*
* \param id processor id (Carefully it is not the processor number)
* \param j box
* \return the box
*
*/
inline const ::Box<dim,T> & getProcessorIGhostBox(size_t id, size_t j) const
{
proc_int_box.get(id).ibx.get(j);
}
/*! \brief Get the number of Near processor
*
* \return the number of near processors
*
*/
inline size_t getNNProcessors() const
{
return nn_processors.size();
......@@ -1198,7 +1291,7 @@ p1[0]<-----+ +----> p2[0]
//! p_sub_X.vtk domain for the processor X as union of sub-domain
VTKWriter<openfpm::vector<::SpaceBox<dim,T>>,VECTOR_BOX> vtk_box1;
vtk_box1.add(sub_domains);
vtk_box1.write(std::string("p_sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk"));
vtk_box1.write(output + std::string("p_sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk"));
//! sub_np_c_X.vtk sub-domain of the near processors contiguous to the processor X (Color encoded)
VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box2;
......@@ -1209,7 +1302,7 @@ p1[0]<-----+ +----> p2[0]
if (it != nn_processor_subdomains.end())
vtk_box2.add(nn_processor_subdomains.at(prc).bx);
}
vtk_box2.write(std::string("sub_np_c_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk"));
vtk_box2.write(output + std::string("sub_np_c_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk"));
//! sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the processors X sub-domains (Color encoded)
VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box3;
......@@ -1220,7 +1313,7 @@ p1[0]<-----+ +----> p2[0]
vtk_box3.add(box_nn_processor_int.get(p).get(s).nbx);
}
}
vtk_box3.write(std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_inte_g_np") + std::string(".vtk"));
vtk_box3.write(output + std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_inte_g_np") + std::string(".vtk"));
//! sub_X_ghost.vtk ghost for the processor X (Color encoded)
......@@ -1232,7 +1325,9 @@ p1[0]<-----+ +----> p2[0]
vtk_box4.add(box_nn_processor_int.get(p).get(s).bx);
}
}
vtk_box4.write(std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_ghost") + std::string(".vtk"));
vtk_box4.write(output + std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_ghost") + std::string(".vtk"));
return true;
}
};
......
......@@ -9,6 +9,9 @@
#include "grid_dist_id_iterator.hpp"
#include "grid_dist_key.hpp"
#include "NN/CellList/CellDecomposer.hpp"
#include "util/object_util.hpp"
#include "memory/ExtPreAlloc.hpp"
#include "VTKWriter.hpp"
#define SUB_UNIT_FACTOR 64
......@@ -100,11 +103,47 @@ class grid_dist_id
}
}
/*! \brief Create per-processor internal ghost box list in grid units
*
*/
void create_ig_box()
{
// Get the grid info
auto g = cd_sm.getGrid();
if (init_i_g_box == true) return;
// Get the number of near processors
for (size_t i = 0 ; i < dec.getNNProcessors() ; i++)
{
ig_box.add();
auto&& pib = ig_box.last();
pib.prc = dec.IDtoProc(i);
for (size_t j = 0 ; j < dec.getProcessorNIGhost(i) ; j++)
{
// Get the internal ghost boxes and transform into grid units
::Box<dim,St> ib = dec.getProcessorIGhostBox(i,j);
ib /= cd_sm.getCellBox().getP2();
// save the box and the sub-domain id (it is calculated as the linearization of P1)
// It is unique because it is ensured that boxes does not overlap
::Box<dim,size_t> cvt = ib;
Box_id bid_t(cvt);
bid_t.id = 0/*g.LinId(bid_t.box.getKP1())*/;
pib.bid.add(bid_t);
}
}
init_i_g_box = true;
}
public:
//! constructor
grid_dist_id(Vcluster v_cl, Decomposition & dec, const size_t (& g_sz)[dim], const Box<dim,St> & domain, const Ghost<dim,T> & ghost)
:domain(domain),cd_sm(domain,g_sz,0),ghost(ghost),loc_grid(NULL),v_cl(v_cl),dec(dec)
:domain(domain),ghost(ghost),loc_grid(NULL),cd_sm(domain,g_sz,0),v_cl(v_cl),dec(dec)
{
// fill the global size of the grid
for (int i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
......@@ -125,6 +164,9 @@ public:
// Create local grid
Create();
// Calculate ghost boxes
dec.calculateGhostBoxes(ghost);
}
/*! \brief Constrcuctor
......@@ -133,8 +175,8 @@ public:
* \param domain
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain)
:domain(domain),cd_sm(domain,g_sz,0),ghost(0),dec(Decomposition(*global_v_cluster)),v_cl(*global_v_cluster)
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g)
:domain(domain),ghost(g),dec(Decomposition(*global_v_cluster)),cd_sm(domain,g_sz,0),v_cl(*global_v_cluster)
{
// fill the global size of the grid
for (int i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
......@@ -155,6 +197,9 @@ public:
// Create local grid
Create();
// Calculate ghost boxes
dec.calculateGhostBoxes(ghost);
}
/*! \brief Get the object that store the decomposition information
......@@ -177,6 +222,9 @@ public:
// Box used for rounding error
Box<dim,St> rnd_box;
for (size_t i = 0 ; i < dim ; i++) {rnd_box.setHigh(i,0.5); rnd_box.setLow(i,0.5);}
// Box used for rounding in case of ghost
Box<dim,St> g_rnd_box;
for (size_t i = 0 ; i < dim ; i++) {g_rnd_box.setHigh(i,0.5); g_rnd_box.setLow(i,-0.5);}
// ! Create an hyper-cube approximation.
// ! In order to work on grid_dist the decomposition
......@@ -206,30 +254,32 @@ public:
// enlarge by 0.5 for rounding
sp.enlarge(rnd_box);
// Convert from SpaceBox<dim,float> to SpaceBox<dim,size_t>
SpaceBox<dim,size_t> sp_t = sp;
// Convert from SpaceBox<dim,float> to SpaceBox<dim,long int>
SpaceBox<dim,long int> sp_t = sp;
// convert the ghost from space coordinate to grid units
Ghost<dim,St> g_int = ghost;
g_int /= cd_sm.getCellBox().getP2();
// enlarge by 0.5 for rounding
g_int.enlarge(rnd_box);
g_int.enlarge(g_rnd_box);
// convert from Ghost<dim,St> to Ghost<dim,long int>
Ghost<dim,long int> g_int_t = g_int;
// convert from Ghost<dim,St> to Ghost<dim,size_t>
Ghost<dim,size_t> g_int_t = g_int;
// Center the local grid to zero
sp_t -= sp_t.getP1();
// for each local grid save the extension of ghost + domain part
// save the domain box seen inside the domain + ghost box (see GDBoxes for a visual meaning)
gdb_ext.last().Dbox = sp_t;
gdb_ext.last().Dbox -= g_int_t.getP1();
gdb_ext.last().Dbox.shrinkP2(1);
// Enlarge sp with the Ghost size
sp_t.enlarge_fix_P1(g_int_t);
// Translate the domain (see GDBoxes for a visual meaning)
gdb_ext.last().Dbox.translate(g_int_t.getP1());
// Get the local size
for (size_t i = 0 ; i < dim ; i++) {l_res[i] = sp_t.getHigh(i) - sp_t.getLow(i);}
// Get the size of the local grid
for (size_t i = 0 ; i < dim ; i++) {l_res[i] = sp_t.getHigh(i);}
// Set the dimensions of the local grid
loc_grid.get(i).template resize<Memory>(l_res);
......@@ -279,6 +329,204 @@ public:
{
return loc_grid.get(v1.getSub()).template get<p>(v1.getKey());
}
/*! \brief it store a box and its unique id
*
*/
struct Box_id
{
//! Constructor
inline Box_id(const ::Box<dim,size_t> & box)
:box(box)
{}
//! Box
::Box<dim,size_t> box;
//! id
size_t id;
};
/*! \brief Internal ghost box
*
*/
struct p_box_grid
{
// Internal ghost in grid units
openfpm::vector<Box_id> bid;
//! processor id
size_t prc;
};
//! Flag that indicate if internal ghost box has been initialized
bool init_i_g_box = false;
//! Internal ghost boxes in grid units
openfpm::vector<p_box_grid> ig_box;
/*! \brief It synchronize getting the ghost part of the grid
*
* \tparam prp Properties to get (sequence of properties ids)
* \opt options (unused)
*
*/
template<int... prp> void ghost_get()
{
// Sending property object
typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
// send vector for each processor
typedef openfpm::vector<prp_object,openfpm::device_cpu<prp_object>,ExtPreAlloc<Memory>> send_vector;
// Send buffer size in byte ( one buffer for all processors )
size_t size_byte_prp = 0;
create_ig_box();
// Convert the ghost boxes into grid unit boxes
/* const openfpm::vector<p_box_grid> p_box_g;
// total number of sending vector
size_t n_send_vector = 0;
// Calculate the total size required for the sending buffer for each processor
for ( size_t i = 0 ; i < p_box_g.size() ; i++ )
{
// for each ghost box
for (size_t j = 0 ; j < p_box_g.get(i).box.size() ; j++)
{
size_t alloc_ele = openfpm::vector<prp_object>::calculateMem(p_box_g.get(i).box.get(j).volume(),0);
pap_prp.push_back(alloc_ele);
size_byte_prp += alloc_ele;
n_send_vector++;
}
}
// resize the property buffer memory
g_prp_mem.resize(size_byte_prp);
// Create an object of preallocated memory for properties
ExtPreAlloc<Memory> * prAlloc_prp = new ExtPreAlloc<Memory>(pap_prp,g_prp_mem);
// create a vector of send vector (ExtPreAlloc warrant that all the created vector are contiguous)
openfpm::vector<send_vector> g_send_prp;
// create a number of send buffers equal to the near processors
g_send_prp.resize(n_send_vector);
// Create the vectors giving them memory
for ( size_t i = 0 ; i < p_box_g.size() ; i++ )
{
// for each ghost box
for (size_t j = 0 ; j < p_box_g.get(i).box.size() ; j++)
{
// set the preallocated memory to ensure contiguity
g_send_prp.get(i).setMemory(*prAlloc_prp);
// resize the sending vector (No allocation is produced)
g_send_prp.get(i).resize(ghost_prc_sz.get(i));
}
}
// Fill the sending buffers and produce a sending request
for ( size_t i = 0 ; i < p_box_g.size() ; i++ )
{
// for each ghost box
for (size_t j = 0 ; j < p_box_g.get(i).box.size() ; j++)
{
// Create a sub grid iterator of the ghost
grid_key_dx_iterator_sub<dim> g_it(,p_box_g.getKP1(),p_box_g.getKP2());
while (g_it.isNext())
{
// copy all the object in the send buffer
typedef encapc<1,prop,typename openfpm::vector<prop>::memory_t> encap_src;
// destination object type
typedef encapc<1,prp_object,typename openfpm::vector<prp_object>::memory_t> encap_dst;
// Copy only the selected properties
object_si_d<encap_src,encap_dst,ENCAP,prp...>(v_prp.get(INTERNAL).get(opart.get(i).get(j)),g_send_prp.get(i).get(j));
++g_it;
}
}
// Queue a send request
v_cl.send(p_box_g.get(i).proc,g_send_prp.get(i));
}
// Calculate the receive pattern and allocate the receive buffers
// For each processor get the ghost boxes
const openfpm::vector<p_box> & p_box_ext = dec.getGhostExternalBox();
// Convert the ghost boxes into grid unit boxes
const openfpm::vector<p_box_grid> p_box_g_ext;
size_byte_prp = 0;
// Calculate the receive pattern
for ( size_t i = 0 ; i < p_box_g_ext.size() ; i++ )
{
// for each ghost box
for (size_t j = 0 ; j < p_box_g_ext.get(i).box.size() ; j++)
{
size_t alloc_ele = openfpm::vector<prp_object>::calculateMem(p_box_g_ext.get(i).box.get(j).volume(),0);
pap_prp_recv.push_back(alloc_ele);
size_byte_prp += alloc_ele;
}
v_cl.recv();
}
// Fill the internal ghost
// wait to receive communication
v_cl.execute();
// move the received buffers into the ghost part
*/
}
/*! \brief Write the grid_dist_id information as VTK file
*
* The function generate several files
*
* 1)
*
* where X is the processor number
*
* \param output directory where to write the files
*
*/
bool write(std::string output) const
{
VTKWriter<openfpm::vector<::Box<dim,size_t>>,VECTOR_BOX> vtk_box1;
openfpm::vector< openfpm::vector< ::Box<dim,size_t> > > boxes;
//! Carefully we have to ensure that boxes does not reallocate inside the for loop
boxes.reserve(ig_box.size());
//! Write internal ghost in grid units (Color encoded)
for (size_t p = 0 ; p < ig_box.size() ; p++)
{
boxes.add();
// Create a vector of boxes
for (size_t j = 0 ; j < ig_box.get(p).bid.size() ; j++)
{
boxes.last().add(ig_box.get(p).bid.get(j).box);
}
vtk_box1.add(boxes.last());
}
vtk_box1.write(output + std::string("internal_ghost_") + std::to_string(v_cl.getProcessUnitID()) + std::string(".vtk"));
return true;
}
};
/*! \brief This is a distributed grid
......
......@@ -38,9 +38,9 @@ template<unsigned int dim>
struct GBoxes
{
//! Ghost + Domain ghost
Box<dim,size_t> GDbox;
Box<dim,long int> GDbox;
//! Domain box
Box<dim,size_t> Dbox;
Box<dim,long int> Dbox;
};
#include "grid_dist_key.hpp"
......@@ -78,7 +78,7 @@ class grid_dist_iterator
*
*/
grid_dist_iterator(Vcluster_object_array<device_grid> & gk, const openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext)
:g_c(0),gdb_ext(gdb_ext),gList(gk),m(m)
:g_c(0),gList(gk),gdb_ext(gdb_ext),m(m)
{
// Initialize the current iterator
// with the first grid
......
......@@ -39,12 +39,13 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use)
// grid size
size_t sz[2] = {1024,1024};
// Distributed grid with id decomposition
// Ghost
Ghost<2,float> g(0.01);
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain);
// Distributed grid with id decomposition
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g);
// get the domain iterator
size_t count = 0;
auto dom = g_dist.getDomainIterator();
......@@ -84,6 +85,10 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use)
++dom2;
}
g_dist.template ghost_get<0>();
g_dist.write("");
/* auto g_it = g_dist.getIteratorBulk();
auto g_it_halo = g_dist.getHalo();
......
......@@ -393,8 +393,7 @@ public:
/*! \brief It synchronize getting the ghost particles
*
* \prp Properties to get
* \opt options
* NO_RELABEL: If the particles does not move avoid to relabel and send particle position
* \opt options WITH_POSITION, it send also the positional information of the particles
*
*/
template<int... prp> void ghost_get(size_t opt = WITH_POSITION)
......
......@@ -123,10 +123,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_ghost )
{
auto key = g_it.get();
float x0 = vd.getPos<s::x>(key)[0];
float x1 = vd.getPos<s::x>(key)[1] * 16;
float scalar = vd.template getProp<p::s>(key);
// Check the received data
BOOST_REQUIRE_EQUAL(vd.getPos<s::x>(key)[0] + vd.getPos<s::x>(key)[1] * 16,vd.template getProp<p::s>(key));
......@@ -151,12 +147,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_ghost )
// Check that the particle come from the correct processor
BOOST_REQUIRE_EQUAL(vd.getProp<p::v>(key)[0],dec.getGhostBoxProcessor(lb));
if (b == 0)
{
int debug = 0;
debug++;
}
++g_it;
}
......@@ -173,7 +163,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_ghost )
BOOST_AUTO_TEST_CASE( vector_dist_iterator_test_use )
{
typedef Point_test<float> p;
typedef Point<2,float> s;
Vcluster & v_cl = *global_v_cluster;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment