Commit c6cb01a5 authored by incardon's avatar incardon

Adding MAP_LOCAL option

parent de5c3d4a
......@@ -65,6 +65,7 @@
#define GCL_SYMMETRIC 1
#define GCL_HILBERT 2
//! General function t get a cell-list
template<unsigned int dim, typename St, typename CellL, typename Vector, unsigned int impl>
struct gcl
......@@ -1498,7 +1499,7 @@ public:
reorder_sfc<CellL,grid_key_dx_iterator_hilbert<dim>>(v_pos_dest,v_prp_dest,h_it,cell_list);
}
else if (reorder_opt::LINEAR)
else if (opt == reorder_opt::LINEAR)
{
grid_sm<dim,void> gs(div);
grid_key_dx_iterator<dim> h_it(gs);
......@@ -1721,9 +1722,17 @@ public:
*
*
*/
template<unsigned int ... prp> void map_list()
template<unsigned int ... prp> void map_list(size_t opt = NONE)
{
this->template map_list_<prp...>(v_pos,v_prp,g_m);
#ifdef SE_CLASS3
se3.map_pre();
#endif
this->template map_list_<prp...>(v_pos,v_prp,g_m,opt);
#ifdef SE_CLASS3
se3.map_post();
#endif
}
......@@ -1737,13 +1746,13 @@ public:
*
*
*/
template<typename obp = KillParticle> void map()
template<typename obp = KillParticle> void map(size_t opt = NONE)
{
#ifdef SE_CLASS3
se3.map_pre();
#endif
this->template map_<obp>(v_pos,v_prp,g_m);
this->template map_<obp>(v_pos,v_prp,g_m,opt);
#ifdef SE_CLASS3
se3.map_post();
......
......@@ -17,6 +17,8 @@
#define BIND_DEC_TO_GHOST 1
#define MAP_LOCAL 2
/*! \brief compute the communication options from the ghost_get/put options
*
*
......@@ -681,7 +683,10 @@ class vector_dist_comm
* \param prc_sz For each processor the number of particles to send
*
*/
template<typename obp> void labelParticleProcessor(openfpm::vector<Point<dim, St>> & v_pos, openfpm::vector<aggregate<size_t,size_t,size_t>> & lbl_p, openfpm::vector<size_t> & prc_sz)
template<typename obp>
void labelParticleProcessor(openfpm::vector<Point<dim, St>> & v_pos,
openfpm::vector<aggregate<size_t,size_t,size_t>> & lbl_p,
openfpm::vector<size_t> & prc_sz)
{
// reset lbl_p
lbl_p.clear();
......@@ -703,9 +708,9 @@ class vector_dist_comm
// Check if the particle is inside the domain
if (dec.getDomain().isInside(v_pos.get(key)) == true)
p_id = dec.processorIDBC(v_pos.get(key));
{p_id = dec.processorIDBC(v_pos.get(key));}
else
p_id = obp::out(key, v_cl.getProcessUnitID());
{p_id = obp::out(key, v_cl.getProcessUnitID());}
// Particle to move
if (p_id != v_cl.getProcessUnitID())
......@@ -1063,9 +1068,12 @@ public:
* \param v_pos vector of particle positions
* \param v_prp vector of particle properties
* \param g_m ghost marker
* \param opt options
*
*/
template<unsigned int ... prp> void map_list_(openfpm::vector<Point<dim, St>> & v_pos, openfpm::vector<prop> & v_prp, size_t & g_m)
template<unsigned int ... prp>
void map_list_(openfpm::vector<Point<dim, St>> & v_pos,
openfpm::vector<prop> & v_prp, size_t & g_m, size_t opt = NONE)
{
typedef KillParticle obp;
......@@ -1095,6 +1103,17 @@ public:
}
}
// In case we have receive option
if (opt & MAP_LOCAL)
{
// if the map is local we indicate that we receive only from the neighborhood processors
prc_recv_map.clear();
for (size_t i = 0 ; i < dec.getNNProcessors() ; i++)
{prc_recv_map.add(dec.IDtoProc(i));}
}
// Sending property object
typedef object<typename object_creator<typename prop::type, prp...>::type> prp_object;
......@@ -1105,8 +1124,8 @@ public:
fill_send_map_buf_list<prp_object,prp...>(v_pos,v_prp,prc_sz_r, m_pos, m_prp);
v_cl.SSendRecv(m_pos,v_pos,prc_r,prc_recv_map,recv_sz_map);
v_cl.SSendRecvP<openfpm::vector<prp_object>,decltype(v_prp),layout_base,prp...>(m_prp,v_prp,prc_r,prc_recv_map,recv_sz_map);
v_cl.SSendRecv(m_pos,v_pos,prc_r,prc_recv_map,recv_sz_map,opt);
v_cl.SSendRecvP<openfpm::vector<prp_object>,decltype(v_prp),layout_base,prp...>(m_prp,v_prp,prc_r,prc_recv_map,recv_sz_map,opt);
// mark the ghost part
......@@ -1129,7 +1148,7 @@ public:
template<typename obp = KillParticle>
void map_(openfpm::vector<Point<dim, St>> & v_pos,
openfpm::vector<prop,Memory,typename layout_base<prop>::type,layout_base> & v_prp,
size_t & g_m)
size_t & g_m, size_t opt = NONE)
{
// Processor communication size
openfpm::vector<size_t> prc_sz(v_cl.getProcessingUnits());
......@@ -1164,8 +1183,8 @@ public:
fill_send_map_buf(v_pos,v_prp, prc_sz_r, m_pos, m_prp);
v_cl.SSendRecv(m_pos,v_pos,prc_r,prc_recv_map,recv_sz_map);
v_cl.SSendRecv(m_prp,v_prp,prc_r,prc_recv_map,recv_sz_map);
v_cl.SSendRecv(m_pos,v_pos,prc_r,prc_recv_map,recv_sz_map,opt);
v_cl.SSendRecv(m_prp,v_prp,prc_r,prc_recv_map,recv_sz_map,opt);
// mark the ghost part
......
......@@ -755,7 +755,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_use_3d )
}
}
BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_random_walk )
void test_random_walk(size_t opt)
{
Vcluster & v_cl = create_vcluster();
......@@ -823,7 +823,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_random_walk )
++it;
}
vd.map();
vd.map(opt);
vd.ghost_get<0>();
......@@ -835,6 +835,16 @@ BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_random_walk )
}
}
BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_random_walk )
{
test_random_walk(NONE);
}
BOOST_AUTO_TEST_CASE( vector_dist_periodic_test_random_walk_local_map )
{
test_random_walk(MAP_LOCAL);
}
BOOST_AUTO_TEST_CASE( vector_dist_periodic_map )
{
Box<3,float> box({0.0,0.0,0.0},{1.0,1.0,1.0});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment