Commit 2599b166 authored by incardon's avatar incardon

Latest AMR sparse data

parent a66e35a2
......@@ -174,7 +174,7 @@ int main(int argc, char* argv[])
* Before and after. The blue arrows in the first image indicate the vector field
* for the real particles. In the second image instead the red arrow indicate the
* vector field for the real particle. The blue arrow indicate the ghosts. We can
* note that the blue arrow doea not contain the correct vector. The reason is that
* note that the blue arrow does not contain the correct vector. The reason is that
* when we used **ghost_get** we synchronized the scalar, and the tensor, but not the vector.
*
* \see \ref e1_part_ghost
......
......@@ -16,7 +16,7 @@
int main(int argc, char ** argv)
{
CartesianGraphFactory<2,Graph_CSR<nm_v,nm_e>> g_factory;
CartesianGraphFactory<2,Graph_CSR<nm_v<2>,nm_e>> g_factory;
// Cartesian grid
size_t sz[2] = {20,20};
......@@ -28,19 +28,19 @@ int main(int argc, char ** argv)
// Graph to decompose
Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
Graph_CSR<nm_v<2>,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
// Convert the graph to metis
Metis<Graph_CSR<nm_v,nm_e>> met(g,4);
Metis<Graph_CSR<nm_v<2>,nm_e>> met(g,4);
// decompose
met.decompose<nm_v::id>();
met.decompose<nm_v_id>();
// Write the decomposition
VTKWriter<Graph_CSR<nm_v,nm_e>,VTK_GRAPH> vtk(g);
VTKWriter<Graph_CSR<nm_v<2>,nm_e>,VTK_GRAPH> vtk(g);
vtk.write("Metis/vtk_partition.vtk");
}
......
openfpm_data @ 874297aa
Subproject commit c08415a142c1b7fe865af3cc068f1a1ff0cc8a9e
Subproject commit 874297aafc931ce5d2681b9cebcbec9d63716a3d
......@@ -15,7 +15,87 @@
#define AMR_IMPL_PATCHES 2
#define AMR_IMPL_OPENVDB 3
template<typename Decomposition, typename garray>
class Decomposition_encap
{
Decomposition & dec;
garray & gd_array;
public:
Decomposition_encap(Decomposition & dec, garray & gd_array)
:dec(dec),gd_array(gd_array)
{}
Decomposition & internal_dec() const
{
return dec;
}
/*! \brief Start decomposition
*
*/
void decompose()
{
dec.decompose();
for(size_t i = 0 ; i < gd_array.size() ; i++)
{
Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
gd_array.get(i).getDecomposition() = dec.duplicate(gold);
}
}
/*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call
*
* \param ts number of time step from the previous load balancing
*
*/
void refine(size_t ts)
{
dec.refine();
for(size_t i = 0 ; i < gd_array.size() ; i++)
{
Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
gd_array.get(i).getDecomposition() = dec.duplicate(gold);
}
}
/*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call
*
* \param ts number of time step from the previous load balancing
*
*/
void redecompose(size_t ts)
{
dec.redecompose();
for(size_t i = 0 ; i < gd_array.size() ; i++)
{
Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost();
gd_array.get(i).getDecomposition() = dec.duplicate(gold);
}
}
auto getDistribution() -> decltype(dec.getDistribution())
{
return dec.getDistribution();
}
Decomposition_encap<Decomposition,garray> operator=(const Decomposition_encap<Decomposition,garray> & de) const
{
for(size_t i = 0 ; i < gd_array.size() ; i++)
{gd_array.get(i).getDecomposition() = de.dec;}
return *this;
}
bool write(std::string output) const
{
return dec.write(output);
}
};
template<unsigned int dim, typename St, typename T, unsigned int impl=AMR_IMPL_TRIVIAL ,typename Decomposition = CartDecomposition<dim,St>,typename Memory=HeapMemory , typename device_grid=grid_cpu<dim,T> >
class grid_dist_amr
......@@ -97,6 +177,78 @@ class grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,Decomposition,Memory,device_grid>
gd_array.last().setBackgroundValue(bck);
}
recalculate_mvoff();
}
public:
/*! \brief Constructor
*
* \param domain Simulation domain
* \param g ghost extension
*
*/
grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g)
:domain(domain),g_int(g)
{
// set boundary consitions to non periodic
for (size_t i = 0; i < dim ; i++)
{bc.bc[i] = NON_PERIODIC;}
}
/*! \brief Constructor
*
* \param domain Simulation domain
* \param g ghost extension
* \param bc boundary conditions
*
*/
grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g, periodicity<dim> & bc)
:domain(domain),g_int(g),bc(bc)
{
}
/*! \brief Initialize the amr grid
*
* \param dec Decomposition (this parameter is useful in case we want to constrain the AMR to an external decomposition)
* \param n_lvl maximum number of levels (0 mean no additional levels)
* \param g_sz coarsest grid size on each direction
*
*/
void initLevels(const Decomposition & dec, size_t n_lvl,const size_t (& g_sz)[dim])
{
size_t g_sz_lvl[dim];
for (size_t i = 0; i < dim ; i++)
{g_sz_lvl[i] = g_sz[i];}
// Add the coarse level
gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(dec,g_sz,g_int));
initialize_other(n_lvl,g_sz_lvl);
}
/*! \brief Initialize the amr grid
*
* \param dec Decomposition (this parameter is useful in case we want to constrain the AMR to an external decomposition)
* \param n_lvl maximum number of levels (0 mean no additional levels)
* \param g_sz coarsest grid size on each direction
*
*/
void initLevels(const Decomposition_encap<Decomposition,decltype(gd_array)> & dec, size_t n_lvl,const size_t (& g_sz)[dim])
{
initLevels(dec.internal_dec(),n_lvl,g_sz);
}
/*! \brief Recalculate the offset array for the moveLvlUp and moveLvlDw
*
*
*
*/
void recalculate_mvoff()
{
// Here we calculate the offset to move one level up and one level down
// in global coordinated moving one level up is multiply the coordinates by 2
// and moving one level down is dividing by 2. In local coordinates is the same
......@@ -149,73 +301,61 @@ class grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,Decomposition,Memory,device_grid>
}
}
public:
/*! \brief Constructor
/*! \brief Initialize the amr grid
*
* \param domain Simulation domain
* \param g ghost extension
* \param n_lvl maximum number of levels (0 mean no additional levels)
* \param g_sz coarsest grid size on each direction
* \param opt options
*
*/
grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g)
:domain(domain),g_int(g)
void initLevels(size_t n_lvl,const size_t (& g_sz)[dim], size_t opt = 0)
{
// set boundary consitions to non periodic
size_t g_sz_lvl[dim];
for (size_t i = 0; i < dim ; i++)
{bc.bc[i] = NON_PERIODIC;}
{g_sz_lvl[i] = g_sz[i];}
// Add the coarse level
gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(g_sz,domain,g_int,bc,opt));
initialize_other(n_lvl,g_sz_lvl);
}
/*! \brief Constructor
/*! \brief Add the computation cost on the decomposition using a resolution function
*
* \param domain Simulation domain
* \param g ghost extension
* \param bc boundary conditions
*
* \param md Model to use
* \param ts It is an optional parameter approximately should be the number of ghost get between two
* rebalancing at first decomposition this number can be ignored (default = 1) because not used
*
*/
grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g, periodicity<dim> & bc)
:domain(domain),g_int(g),bc(bc)
template <typename Model>inline void addComputationCosts(Model md=Model(), size_t ts = 1)
{
gd_array.get(0).addComputationCosts(md,ts);
}
/*! \brief Initialize the amr grid
/*! \brief Get the object that store the information about the decomposition
*
* \param dec Decomposition (this parameter is useful in case we want to constrain the AMR to an external decomposition)
* \param n_lvl maximum number of levels (0 mean no additional levels)
* \param g_sz coarsest grid size on each direction
* \return the decomposition object
*
*/
void initLevels(const Decomposition & dec, size_t n_lvl,const size_t (& g_sz)[dim])
Decomposition_encap<Decomposition,decltype(gd_array)> getDecomposition()
{
size_t g_sz_lvl[dim];
for (size_t i = 0; i < dim ; i++)
{g_sz_lvl[i] = g_sz[i];}
Decomposition_encap<Decomposition,decltype(gd_array)> tmp(gd_array.get(0).getDecomposition(),gd_array);
// Add the coarse level
gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(dec,g_sz,g_int));
initialize_other(n_lvl,g_sz_lvl);
return tmp;
}
/*! \brief Initialize the amr grid
/*! \brief Get the underlying grid level
*
* \param n_lvl maximum number of levels (0 mean no additional levels)
* \param g_sz coarsest grid size on each direction
* \param lvl level
*
* \return the grid level
*
*/
void initLevels(size_t n_lvl,const size_t (& g_sz)[dim])
grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> & getLevel(size_t lvl)
{
size_t g_sz_lvl[dim];
for (size_t i = 0; i < dim ; i++)
{g_sz_lvl[i] = g_sz[i];}
// Add the coarse level
gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(g_sz,domain,g_int,bc));
initialize_other(n_lvl,g_sz_lvl);
return gd_array.get(lvl);
}
......@@ -538,6 +678,19 @@ public:
}
}
/*! \brief It move all the grid parts that do not belong to the local processor to the respective processor
*
*/
void map(size_t opt = 0)
{
for (size_t i = 0 ; i < gd_array.size() ; i++)
{
gd_array.get(i).map();
}
recalculate_mvoff();
}
/*! \brief Apply the ghost put
*
* \tparam prp... Properties to apply ghost put
......@@ -587,16 +740,6 @@ public:
{gd_array.get(i).clear();}
}
/*! \brief Get Decomposition
*
* \return get the decomposition
*
*/
const Decomposition & getDecomposition()
{
return gd_array.get(0).getDecomposition();
}
/*! \brief Get an object containing the grid informations for a specific level
*
* \param lvl level
......
......@@ -668,7 +668,7 @@ void Test3D_amr_domain_ghost_it(grid & amr_g, Box<3,float> & domain, size_t coar
template<typename grid_amr>
void Test3D_ghost_put(grid_amr & g_dist_amr, long int k)
{
size_t sz[3] = {k,k,k};
size_t sz[3] = {(size_t)k,(size_t)k,(size_t)k};
g_dist_amr.initLevels(4,sz);
......@@ -941,12 +941,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_amr_get_domain_ghost_check )
BOOST_TEST_CHECKPOINT( "Testing grid periodic k<=" << k );
// grid size
size_t sz[3];
sz[0] = k;
sz[1] = k;
sz[2] = k;
// Ghost
Ghost<3,long int> g(1);
......@@ -979,12 +973,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_amr_ghost_put_create )
BOOST_TEST_CHECKPOINT( "Testing grid periodic k<=" << k );
// grid size
size_t sz[3];
sz[0] = k;
sz[1] = k;
sz[2] = k;
// Ghost
Ghost<3,long int> g(1);
......
......@@ -255,7 +255,7 @@ public:
// Optimize the decomposition creating bigger spaces
// And reducing Ghost over-stress
dec_optimizer<dim, Graph_CSR<nm_v, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
dec_optimizer<dim, Graph_CSR<nm_v<dim>, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
// Ghost
Ghost<dim,long int> ghe;
......@@ -268,7 +268,7 @@ public:
}
// optimize the decomposition
d_o.template optimize<nm_v::sub_id, nm_v::proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
d_o.template optimize<nm_v_sub_id, nm_v_proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
// Initialize
if (loc_box.size() > 0)
......@@ -325,7 +325,7 @@ public:
size_t lin = gr_dist.LinId(key2);
size_t lin2 = gr.LinId(key);
fine_s.get(lin2) = dist.getGraph().template vertex_p<nm_v::proc_id>(lin);
fine_s.get(lin2) = dist.getGraph().template vertex_p<nm_v_proc_id>(lin);
++git;
}
......
......@@ -26,10 +26,10 @@ class DistParMetisDistribution
Box<dim, T> domain;
//! Processor sub-sub-domain graph
DistGraph_CSR<nm_v, nm_e> g;
DistGraph_CSR<nm_v<dim>, nm_e> g;
//! Convert the graph to parmetis format
DistParmetis<DistGraph_CSR<nm_v, nm_e>> parmetis_graph;
DistParmetis<DistGraph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Init vtxdist needed for Parmetis
openfpm::vector<idx_t> vtxdist;
......@@ -91,20 +91,20 @@ public:
domain = dom;
//! Create sub graph
DistGraphFactory<dim, DistGraph_CSR<nm_v, nm_e>> dist_g_factory;
DistGraphFactory<dim, DistGraph_CSR<nm_v<dim>, nm_e>> dist_g_factory;
g = dist_g_factory.template construct<NO_EDGE, T, dim - 1, 0>(gr.getSize(), domain);
g.getDecompositionVector(vtxdist);
if (dim == 2)
for (size_t i = 0; i < g.getNVertex(); i++)
g.vertex(i).template get<nm_v::x>()[2] = 0;
g.vertex(i).template get<nm_v_x>()[2] = 0;
}
/*! \brief Get the current graph (main)
*
*/
DistGraph_CSR<nm_v, nm_e> & getGraph()
DistGraph_CSR<nm_v<dim>, nm_e> & getGraph()
{
return g;
}
......@@ -118,7 +118,7 @@ public:
parmetis_graph.initSubGraph(g);
//! Decompose
parmetis_graph.decompose<nm_v::proc_id>(g);
parmetis_graph.template decompose<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -143,7 +143,7 @@ public:
parmetis_graph.reset(g);
//! Refine
parmetis_graph.refine<nm_v::proc_id>(g);
parmetis_graph.template refine<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -194,10 +194,10 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << " Position - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
pos[0] = g.vertex(id).template get<nm_v::x>()[0];
pos[1] = g.vertex(id).template get<nm_v::x>()[1];
pos[0] = g.vertex(id).template get<nm_v_x>()[0];
pos[1] = g.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = g.vertex(id).template get<nm_v::x>()[2];
pos[2] = g.vertex(id).template get<nm_v_x>()[2];
}
/*! \brief Function that set the weight of the vertex
......@@ -215,7 +215,7 @@ public:
#endif
// If the vertex is inside this processor update the value
g.vertex(id).template get<nm_v::computation>() = weight;
g.vertex(id).template get<nm_v_computation>() = weight;
}
......@@ -240,7 +240,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getTotNVertex() << ")\n";
#endif
return g.vertex(id).template get<nm_v::computation>();
return g.vertex(id).template get<nm_v_computation>();
}
/*! \brief Compute the processor load counting the total weights of its vertices
......@@ -253,7 +253,7 @@ public:
for (size_t i = 0; i < g.getNVertex(); i++)
{
load += g.vertex(i).template get<nm_v::computation>();
load += g.vertex(i).template get<nm_v_computation>();
}
return load;
}
......@@ -294,7 +294,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Migration - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
g.vertex(id).template get<nm_v::migration>() = migration;
g.vertex(id).template get<nm_v_migration>() = migration;
}
/*! \brief Set communication cost of the edge id
......@@ -333,7 +333,7 @@ public:
*/
void write(const std::string & file)
{
VTKWriter<DistGraph_CSR<nm_v, nm_e>, DIST_GRAPH> gv2(g);
VTKWriter<DistGraph_CSR<nm_v<dim>, nm_e>, DIST_GRAPH> gv2(g);
gv2.write(std::to_string(file + ".vtk"));
}
......
......@@ -39,13 +39,13 @@ class MetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Flag that indicate if we are doing a test (In general it fix the seed)
bool testing = false;
//! Metis decomposer utility
Metis<Graph_CSR<nm_v, nm_e>> metis_graph;
Metis<Graph_CSR<nm_v<dim>, nm_e>> metis_graph;
/*! \brief sub-domain list and weight
*
......@@ -104,7 +104,7 @@ class MetisDistribution
public:
static constexpr unsigned int computation = nm_v::computation;
static constexpr unsigned int computation = nm_v_computation;
/*! \brief constructor
*
......@@ -182,20 +182,20 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
// Init to 0.0 axis z (to fix in graphFactory)
if (dim < 3)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
}
}
for (size_t i = 0; i < gp.getNVertex(); i++)
gp.vertex(i).template get<nm_v::global_id>() = i;
gp.vertex(i).template get<nm_v_global_id>() = i;
}
/*! \brief Get the current graph (main)
......@@ -203,7 +203,7 @@ public:
* \return the current sub-sub domain Graph
*
*/
Graph_CSR<nm_v, nm_e> & getGraph()
Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{
#ifdef SE_CLASS2
check_valid(this,8);
......@@ -230,7 +230,7 @@ public:
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
gp.template vertex_p<nm_v::computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
metis_graph.initMetisGraph(v_cl.getProcessingUnits(),true);
}
......@@ -239,13 +239,13 @@ public:
metis_graph.onTest(testing);
// decompose
metis_graph.decompose<nm_v::proc_id>();
metis_graph.template decompose<nm_v_proc_id>();
if (recv_ass.size() != 0)
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id);
}
else
{
......@@ -255,7 +255,7 @@ public:
for (size_t i = 0 ; i < gp.getNVertex() ; i++)
{
recv_ass.get(i).id = i;
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(i);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(i);
}
}
}
......@@ -277,7 +277,7 @@ public:
// Fill the metis graph
for (size_t i = 0 ; i < recv_ass.size() ; i++)
{
gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
if (recv_ass.get(i).w == v_cl.getProcessUnitID())
{
......@@ -330,10 +330,10 @@ public:
check_overflow(id);
// Copy the geometrical informations inside the pos vector
pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
pos[0] = gp.vertex(id).template get<nm_v_x>()[0];
pos[1] = gp.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
{pos[2] = gp.vertex(id).template get<nm_v_x>()[2];}
}
/*! \brief function that get the computational cost of the sub-sub-domain
......@@ -349,7 +349,7 @@ public:
check_valid(this,8);
#endif
check_overflow(id);
return gp.vertex(id).template get<nm_v::computation>();
return gp.vertex(id).template get<nm_v_computation>();
}
......@@ -394,7 +394,7 @@ public:
check_overflow(id);
#endif
gp.vertex(id).template get<nm_v::migration>() = cost;
gp.vertex(id).template get<nm_v_migration>() = cost;
}
/*! \brief Set communication cost between neighborhood sub-sub-domains (weight on the edge)
......@@ -522,7 +522,7 @@ public:
check_valid(this,8);
#endif
VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
VTKWriter<Graph_CSR<nm_v<dim>, nm_e>, VTK_GRAPH> gv2(gp);
gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + out + ".vtk");
}
......@@ -545,7 +545,7 @@ public:
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
loads.get(gp.template vertex_p<nm_v::proc_id>(i)) += gp.template vertex_p<nm_v::computation>(i);
{loads.get(gp.template vertex_p<nm_v_proc_id>(i)) += gp.template vertex_p<nm_v_computation>(i);}
for (size_t i = 0 ; i < v_cl.getProcessingUnits() ; i++)
{
......
......@@ -47,10 +47,10 @@ class ParMetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Convert the graph to parmetis format
Parmetis<Graph_CSR<nm_v, nm_e>> parmetis_graph;
Parmetis<Graph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Id of the sub-sub-domain where we set the costs
openfpm::vector<size_t> sub_sub_owner;
......@@ -114,10 +114,10 @@ class ParMetisDistribution
auto v_id = m2g.find(l)->second.id;
// Update proc id in the vertex (using the old map)
gp.template vertex_p<nm_v::proc_id>(v_id) = partitions.get(i).get(k);
gp.template vertex_p<nm_v_proc_id>(v_id) = partitions.get(i).get(k);
if (partitions.get(i).get(k) == (long int)v_cl.getProcessUnitID())
sub_sub_owner.add(v_id);
{sub_sub_owner.add(v_id);}
// Add vertex to temporary structure of distribution (needed to update main graph)
v_per_proc.get(partitions.get(i).get(k)).add(getVertexGlobalId(l));
......@@ -137,12 +137,12 @@ class ParMetisDistribution
for (size_t i = 0 ; i < gp.getNVertex(); ++i)
{
size_t pid = gp.template vertex_p<nm_v::proc_id>(i);
size_t pid = gp.template vertex_p<nm_v_proc_id>(i);