Commit a17d925c authored by incardon's avatar incardon

Map working for grid_dist_id

parent 52bf0490
......@@ -665,6 +665,94 @@ void Test3D_amr_domain_ghost_it(grid & amr_g, Box<3,float> & domain, size_t coar
BOOST_REQUIRE_EQUAL(gtot_count,total_all_level);
}
template<typename grid_amr>
void Test3D_ghost_put(grid_amr & g_dist_amr, long int k)
{
size_t sz[3] = {k,k,k};
g_dist_amr.initLevels(4,sz);
// Grid sm
grid_sm<3,void> info(sz);
size_t count = 0;
for (size_t i = 0 ; i < g_dist_amr.getNLvl() ; i++)
{
auto dom = g_dist_amr.getGridIterator(i);
while (dom.isNext())
{
auto key = dom.get_dist();
g_dist_amr.template insert<0>(i,key) = -6.0;
// Count the points
count++;
++dom;
}
}
// Set to zero the full grid
{
auto dom = g_dist_amr.getDomainIterator();
while (dom.isNext())
{
auto key = dom.get();
g_dist_amr.template insert<0>(key.moveSpace(0,1)) += 1.0;
g_dist_amr.template insert<0>(key.moveSpace(0,-1)) += 1.0;
g_dist_amr.template insert<0>(key.moveSpace(1,1)) += 1.0;
g_dist_amr.template insert<0>(key.moveSpace(1,-1)) += 1.0;
g_dist_amr.template insert<0>(key.moveSpace(2,1)) += 1.0;
g_dist_amr.template insert<0>(key.moveSpace(2,-1)) += 1.0;
++dom;
}
}
bool correct = true;
// Domain + Ghost iterator
auto dom_gi = g_dist_amr.getDomainIterator();
while (dom_gi.isNext())
{
auto key = dom_gi.get();
correct &= (g_dist_amr.template get<0>(key) == 0);
++dom_gi;
}
g_dist_amr.template ghost_put<add_,0>();
if (count != 0)
{BOOST_REQUIRE_EQUAL(correct, false);}
// sync the ghosts
g_dist_amr.template ghost_get<0>();
correct = true;
// Domain + Ghost iterator
auto dom_gi2 = g_dist_amr.getDomainIterator();
while (dom_gi2.isNext())
{
auto key = dom_gi2.get();
correct &= (g_dist_amr.template get<0>(key) == 0);
++dom_gi2;
}
BOOST_REQUIRE_EQUAL(correct, true);
}
template <typename> struct Debug;
BOOST_AUTO_TEST_CASE( grid_dist_amr_get_child_test_nop )
......@@ -838,127 +926,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_amr_test_background_value )
}
BOOST_AUTO_TEST_CASE( grid_dist_amr_get_domain_ghost_check )
{
// Domain
Box<3,float> domain3({0.0,0.0,0.0},{1.0,1.0,1.0});
Ghost<3,long int> g(1);
sgrid_dist_amr<3,float,aggregate<long int,long int,long int>> amr_g2(domain3,g);
size_t g_sz[3] = {4,4,4};
amr_g2.initLevels(4,g_sz);
// This point is on a ghost
grid_dist_key_dx<3> key(0,grid_key_dx<3>({0,0,0}));
amr_g2.template insert<0>(1,key) = 555;
auto dgit = amr_g2.getDomainGhostIterator();
int cnt = 0;
while (dgit.isNext())
{
cnt++;
++dgit;
}
BOOST_REQUIRE_EQUAL(cnt,1);
}
template<typename grid_amr>
void Test3D_ghost_put(grid_amr & g_dist_amr, long int k)
{
// check the consistency of the decomposition
bool val = g_dist_amr.getDecomposition().check_consistency();
BOOST_REQUIRE_EQUAL(val,true);
size_t sz[3] = {k,k,k};
// Grid sm
grid_sm<3,void> info(sz);
size_t count = 0;
auto dom = g_dist_amr.getGridIterator();
while (dom.isNext())
{
auto key = dom.get_dist();
g_dist_amr.template insert<0>(key) = -6.0;
// Count the points
count++;
++dom;
}
// Set to zero the full grid
{
auto dom = g_dist_amr.getDomainIterator();
while (dom.isNext())
{
auto key = dom.get();
g_dist_amr.template insert<0>(key.move(0,1)) += 1.0;
g_dist_amr.template insert<0>(key.move(0,-1)) += 1.0;
g_dist_amr.template insert<0>(key.move(1,1)) += 1.0;
g_dist_amr.template insert<0>(key.move(1,-1)) += 1.0;
g_dist_amr.template insert<0>(key.move(2,1)) += 1.0;
g_dist_amr.template insert<0>(key.move(2,-1)) += 1.0;
++dom;
}
}
bool correct = true;
// Domain + Ghost iterator
auto dom_gi = g_dist_amr.getDomainIterator();
while (dom_gi.isNext())
{
auto key = dom_gi.get();
correct &= (g_dist_amr.template get<0>(key) == 0);
++dom_gi;
}
g_dist_amr.template ghost_put<add_,0>();
if (count != 0)
{BOOST_REQUIRE_EQUAL(correct, false);}
// sync the ghosts
g_dist_amr.template ghost_get<0>();
correct = true;
// Domain + Ghost iterator
auto dom_gi2 = g_dist_amr.getDomainIterator();
while (dom_gi2.isNext())
{
auto key = dom_gi2.get();
correct &= (g_dist_amr.template get<0>(key) == 0);
++dom_gi2;
}
BOOST_REQUIRE_EQUAL(correct, true);
}
BOOST_AUTO_TEST_CASE( grid_dist_amr_get_domain_ghost_put_check )
{
// Test grid periodic
......@@ -986,14 +954,16 @@ BOOST_AUTO_TEST_CASE( grid_dist_amr_get_domain_ghost_put_check )
periodicity<3> pr = {{PERIODIC,PERIODIC,PERIODIC}};
// Distributed grid with id decomposition
grid_dist_id<3, float, aggregate<long int>> g_dist(sz,domain,g,pr);
grid_dist_amr<3, float, aggregate<long int>> g_dist(domain,g,pr);
Test3D_ghost_put(g_dist,k);
// Distributed grid with id decomposition
sgrid_dist_id<3, float, aggregate<long int>> sg_dist(sz,domain,g,pr);
sgrid_dist_amr<3, float, aggregate<long int>> sg_dist(domain,g,pr);
Test3D_ghost_put(sg_dist,k);
}
BOOST_AUTO_TEST_SUITE_END()
......@@ -762,7 +762,10 @@ public:
cart.cd = cd;
cart.domain = domain;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.bbox = bbox;
cart.ghost = g;
......@@ -800,8 +803,13 @@ public:
cart.gr = gr;
cart.cd = cd;
cart.domain = domain;
cart.gr_dist = gr_dist;
cart.dist = dist;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.ghost = ghost;
......@@ -832,9 +840,14 @@ public:
gr = cart.gr;
cd = cart.cd;
domain = cart.domain;
gr_dist = cart.gr_dist;
dist = cart.dist;
for (size_t i = 0 ; i < dim ; i++)
{spacing[i] = cart.spacing[i];};
{
spacing[i] = cart.spacing[i];
magn[i] = cart.magn[i];
};
ghost = cart.ghost;
......@@ -864,9 +877,15 @@ public:
fine_s.swap(cart.fine_s);
gr = cart.gr;
cd = cart.cd;
gr_dist = cart.gr_dist;
dist = cart.dist;
domain = cart.domain;
for (size_t i = 0 ; i < dim ; i++)
{spacing[i] = cart.spacing[i];};
{
spacing[i] = cart.spacing[i];
magn[i] = cart.magn[i];
};
ghost = cart.ghost;
......
......@@ -638,6 +638,7 @@ public:
verticesGotWeights = dist.verticesGotWeights;
sub_sub_owner = dist.sub_sub_owner;
m2g = dist.m2g;
parmetis_graph = dist.parmetis_graph;
return *this;
}
......@@ -655,6 +656,7 @@ public:
verticesGotWeights = dist.verticesGotWeights;
sub_sub_owner.swap(dist.sub_sub_owner);
m2g.swap(dist.m2g);
parmetis_graph = dist.parmetis_graph;
return *this;
}
......
......@@ -328,7 +328,7 @@ public:
}
if (is_openfpm_init() == true)
MPI_Comm_free(&comm);
{MPI_Comm_free(&comm);}
}
/*! \brief Set the Sub-graph
......@@ -526,8 +526,7 @@ public:
*/
const Parmetis<Graph> & operator=(const Parmetis<Graph> & pm)
{
comm = pm.comm;
v_cl = pm.v_cl;
MPI_Comm_dup(pm.comm, &comm);
p_id = pm.p_id;
nc = pm.nc;
......@@ -545,8 +544,7 @@ public:
*/
const Parmetis<Graph> & operator=(Parmetis<Graph> && pm)
{
comm = pm.comm;
v_cl = pm.v_cl;
MPI_Comm_dup(pm.comm, &comm);
p_id = pm.p_id;
nc = pm.nc;
......
......@@ -40,7 +40,7 @@ struct Box_fix
size_t r_sub;
};
#define GRID_SUB_UNIT_FACTOR 64
#define NO_GDB_EXT_SWITCH 0x1000
/*! \brief This is a distributed grid
*
......@@ -241,6 +241,9 @@ class grid_dist_id : public grid_dist_id_comm<dim,St,T,Decomposition,Memory,devi
//! Local external ghost boxes in grid units
openfpm::vector<e_lbox_grid<dim>> loc_eg_box;
//! Number of sub-sub-domain for each processor
size_t v_sub_unit_factor = 64;
/*! \brief Call-back to allocate buffer to receive incoming objects (external ghost boxes)
*
* \param msg_i message size required to receive from i
......@@ -920,7 +923,7 @@ class grid_dist_id : public grid_dist_id_comm<dim,St,T,Decomposition,Memory,devi
// Get the number of processor and calculate the number of sub-domain
// for decomposition
size_t n_proc = v_cl.getProcessingUnits();
size_t n_sub = n_proc * GRID_SUB_UNIT_FACTOR;
size_t n_sub = n_proc * v_sub_unit_factor;
// Calculate the maximum number (before merging) of sub-domain on
// each dimension
......@@ -968,6 +971,9 @@ class grid_dist_id : public grid_dist_id_comm<dim,St,T,Decomposition,Memory,devi
Create(bx,g,use_bx_def);
}
// Ghost as integer
Ghost<dim,long int> gint = Ghost<dim,long int>(0);
protected:
/*! \brief Get the point where it start the origin of the grid of the sub-domain i
......@@ -1022,6 +1028,16 @@ protected:
return gc;
}
/*! \brief Set the minimum number of sub-domain per processor
*
* \param n_sub
*
*/
void setDecompositionGranularity(size_t n_sub)
{
this->v_sub_unit_factor = n_sub;
}
public:
//! Which kind of grid the structure store
......@@ -1203,7 +1219,10 @@ public:
dec.setParameters(g.getDecomposition(),ghost,this->domain);
InitializeStructures(g.getGridInfoVoid().getSize());
// an empty
openfpm::vector<Box<dim,long int>> empty;
InitializeStructures(g.getGridInfoVoid().getSize(),empty,gh,false);
}
/*! It constructs a grid of a specified size, defined on a specified Box space, forcing to follow a specified decomposition and with a specified ghost size
......@@ -1269,8 +1288,11 @@ public:
ghost = convert_ghost(g,cd_sm);
this->dec = dec.duplicate(ghost);
// an empty
openfpm::vector<Box<dim,long int>> empty;
// Initialize structures
InitializeStructures(g_sz);
InitializeStructures(g_sz,empty,g,false);
}
/*! It construct a grid of a specified size, defined on a specified Box space, forcing to follow a specified decomposition, and having a specified ghost size
......@@ -1293,8 +1315,11 @@ public:
ghost = convert_ghost(g,cd_sm);
this->dec = dec.duplicate(ghost);
// an empty
openfpm::vector<Box<dim,long int>> empty;
// Initialize structures
InitializeStructures(g_sz);
InitializeStructures(g_sz,empty,g,false);
}
/*! It construct a grid of a specified size, defined on a specified Box space, and having a specified ghost size
......@@ -1306,8 +1331,8 @@ public:
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g, size_t opt = 0)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>(),opt)
{
}
......@@ -1320,8 +1345,8 @@ public:
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>())
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g, size_t opt = 0)
:grid_dist_id(g_sz,domain,g,create_non_periodic<dim>(),opt)
{
}
......@@ -1335,13 +1360,16 @@ public:
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g, const periodicity<dim> & p)
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,St> & g, const periodicity<dim> & p, size_t opt = 0)
:domain(domain),ghost(g),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz)
{
#ifdef SE_CLASS2
check_new(this,8,GRID_DIST_EVENT,4);
#endif
if (opt >> 32 != 0)
{this->setDecompositionGranularity(opt >> 32);}
InitializeCellDecomposer(g_sz,p.bc);
InitializeDecomposition(g_sz, p.bc);
InitializeStructures(g_sz);
......@@ -1358,19 +1386,27 @@ public:
* \warning In very rare case the ghost part can be one point bigger than the one specified
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g, const periodicity<dim> & p)
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain, const Ghost<dim,long int> & g, const periodicity<dim> & p, size_t opt = 0)
:domain(domain),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz)
{
#ifdef SE_CLASS2
check_new(this,8,GRID_DIST_EVENT,4);
#endif
if (opt >> 32 != 0)
{this->setDecompositionGranularity(opt >> 32);}
InitializeCellDecomposer(g_sz,p.bc);
ghost = convert_ghost(g,cd_sm);
InitializeDecomposition(g_sz,p.bc);
// an empty
openfpm::vector<Box<dim,long int>> empty;
// Initialize structures
InitializeStructures(g_sz);
InitializeStructures(g_sz,empty,g,false);
}
/*! \brief It construct a grid on the full domain restricted
......@@ -1392,7 +1428,7 @@ public:
const Ghost<dim,long int> & g,
const periodicity<dim> & p,
openfpm::vector<Box<dim,long int>> & bx_def)
:domain(domain),ghost(g),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz)
:domain(domain),dec(create_vcluster()),v_cl(create_vcluster()),ginfo(g_sz),ginfo_v(g_sz),gint(g)
{
#ifdef SE_CLASS2
check_new(this,8,GRID_DIST_EVENT,4);
......@@ -1558,6 +1594,8 @@ public:
#ifdef SE_CLASS2
check_valid(this,8);
#endif
gdb_ext_global.clear();
v_cl.SGather(gdb_ext,gdb_ext_global,0);
v_cl.execute();
......@@ -2260,8 +2298,16 @@ public:
/*! \brief It move all the grid parts that do not belong to the local processor to the respective processor
*
*/
void map()
void map(size_t opt = 0)
{
if (!(opt & NO_GDB_EXT_SWITCH))
{
gdb_ext_old = gdb_ext;
loc_grid_old = loc_grid;
InitializeStructures(g_sz,bx_def,gint,bx_def.size() != 0);
}
getGlobalGridsInfo(gdb_ext_global);
this->template map_(dec,cd_sm,loc_grid,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global);
......@@ -2294,7 +2340,7 @@ public:
h5l.load<device_grid>(filename,loc_grid_old,gdb_ext_old);
// Map the distributed grid
map();
map(NO_GDB_EXT_SWITCH);
}
/*! \brief This is a meta-function return which type of sub iterator a grid produce
......
......@@ -175,7 +175,6 @@ class grid_dist_id_comm
//! Memory for the ghost sending buffer
Memory g_recv_prp_mem;
/*! \brief Sync the local ghost part
*
* \tparam prp... properties to sync
......@@ -223,16 +222,13 @@ class grid_dist_id_comm
if (bx_dst.isValid() == false)
continue;
/* grid_key_dx_iterator_sub<dim> sub_src(loc_grid.get(sub_id_src_gdb_ext).getGrid(),bx_src.getKP1(),bx_src.getKP2());
grid_key_dx_iterator_sub<dim> sub_dst(loc_grid.get(sub_id_dst_gdb_ext).getGrid(),bx_dst.getKP1(),bx_dst.getKP2());*/
#ifdef SE_CLASS1
if (loc_eg_box.get(sub_id_dst).bid.get(k).sub != i)
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination are not correctly linked" << "\n";
{std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination are not correctly linked" << "\n";}
if (sub_src.getVolume() != sub_dst.getVolume())
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination does not match in size" << "\n";
if (bx_src.getVolume() != bx_dst.getVolume())
{std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination does not match in size" << "\n";}
#endif
......@@ -289,19 +285,19 @@ class grid_dist_id_comm
if (bx_dst.isValid() == false)
continue;
auto & gd2 = loc_grid.get(sub_id_dst);
gd2.template copy_to_op<op,prp...>(loc_grid.get(i),bx_src,bx_dst);
#ifdef SE_CLASS1
if (loc_ig_box.get(sub_id_dst).bid.get(k).sub != i)
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination are not correctly linked" << "\n";
if (sub_src.getVolume() != sub_dst.getVolume())
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination does not match in size" << "\n";
if (bx_src.getVolume() != bx_dst.getVolume())
{std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " source and destination does not match in size" << "\n";}
#endif
auto & gd2 = loc_grid.get(sub_id_dst);
gd2.template copy_to_op<op,prp...>(loc_grid.get(i),bx_src,bx_dst);
}
}
}
......@@ -660,15 +656,12 @@ class grid_dist_id_comm
auto sub2 = loc_grid.get(sub_id).getIterator(box.getKP1(),box.getKP2());
grid_unpack_with_prp<op,prp_object,device_grid,BHeapMemory>::template unpacking<decltype(sub2),prp...>(mem,sub2,loc_grid.get(sub_id),ps);
// unpack_data_to_ext_ghost_op<op,BHeapMemory,prp ...>(mem,loc_grid,i,
// ig_box,g_id_to_internal_ghost_box,eb_gid_list,
// ps);
}
}
}
}
public:
/*! \brief Reconstruct the local grids
......@@ -684,45 +677,18 @@ public:
openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext,
CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm)
{
size_t count2 = 0;
for (size_t a = 0; a < m_oGrid_recv.size(); a++)
{
for (size_t k = 0; k < m_oGrid_recv.get(a).size(); k++)
{
device_grid g = m_oGrid_recv.get(a).template get<0>(k);
size_t count = 0;
auto it = g.getIterator();
while (it.isNext())
{
//auto key = it.get();
//if (g.template get<0>(key) != 1)
//std::cout << "WRONG???????" << std::endl;
++it;
count++;
}
SpaceBox<dim,long int> b = m_oGrid_recv.get(a).template get<1>(k);
//device_grid gr_send(sz);
//gr_send.setMemory();
//std::cout << "B: (" << b.getLow(0) << "; " << b.getLow(1) << "); (" << b.getHigh(0) << "; " << b.getHigh(1) << "); " << "G: (" << g.getGrid().getBox().getHigh(0) << "; " << g.getGrid().getBox().getHigh(1) << ")" << std::endl;
// Set the dimensions of the local grid
//g.resize(l_res);
Point<dim,St> p;
for (size_t n = 0; n < dim; n++)
p.get(n) = g.getGrid().getBox().getHigh(n);
//std::cout << "G after resize: (" << g.getGrid().getBox().getLow(0) << "; " << g.getGrid().getBox().getLow(1) << "); (" << g.getGrid().getBox().getHigh(0) << "; " << g.getGrid().getBox().getHigh(1) << ")" << std::endl;
Point<dim,St> point;
for (size_t n = 0; n < dim; n++)
point.get(n) = (b.getHigh(n) + b.getLow(n))/2;
......@@ -738,9 +704,6 @@ public:
grid_key_dx<dim> start = b.getKP1() - grid_key_dx<dim>(gdb_ext.get(j).origin.asArray());
grid_key_dx<dim> stop = b.getKP2() - grid_key_dx<dim>(gdb_ext.get(j).origin.asArray());
std::string start2 = start.to_string();
std::string stop2 = stop.to_string();
auto it = loc_grid.get(j).getSubIterator(start,stop);
// Copy selected elements into a local grid
......@@ -752,7 +715,6 @@ public:
//std::cout << "Key: " << str << std::endl;
loc_grid.get(j).get_o(key) = g.get_o(key2);
count2++;
++it;
}
......@@ -783,11 +745,11 @@ public:
openfpm::vector<openfpm::vector<aggregate<device_grid,SpaceBox<dim,long int>>>> & lbl_b,
openfpm::vector<size_t> & prc_sz)
{
lbl_b.clear();
// resize the label buffer
lbl_b.resize(v_cl.getProcessingUnits());
size_t count2 = 0;
// Label all the intersection grids with the processor id where they should go
for (size_t i = 0; i < gdb_ext_old.size(); i++)
......@@ -814,7 +776,6 @@ public:
if (intersect == true)
{
count2++;
auto inte_box_cont = cd_sm.convertCellUnitsIntoDomainSpace(inte_box);
// Get processor ID that store intersection box
......@@ -936,7 +897,7 @@ public:
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
if (m_oGrid.get(i).size() != 0)
m_oGrid_new.add(m_oGrid.get(i));
{m_oGrid_new.add(m_oGrid.get(i));}
}
// Vector for receiving of intersection grids
......@@ -1176,8 +1137,6 @@ public:
void * pointer2 = prAlloc_prp.getPointerEnd();
// v_cl.send(ig_box.get(i).prc,0,pointer,(char *)pointer2 - (char *)pointer);
// This function send (or queue for sending) the information
send_or_queue(ig_box.get(i).prc,(char *)pointer,(char *)pointer2);