Commit 46aeaf56 authored by incardon's avatar incardon

Grid with sparse_cl

parent 9be1da26
#!groovy
timeout(180)
{
parallel (
......@@ -123,4 +126,5 @@ parallel (
}
)
}
SUBDIRS = src images openfpm_data openfpm_io openfpm_devices openfpm_vcluster openfpm_numerics
ACLOCAL_AMFLAGS = -I m4
bin_PROGRAMS =
pdata:
cd src && make
data:
cd openfpm_data/src && make
devices:
cd openfpm_devices/src && make
vcluster:
cd openfpm_vcluster/src && make
io:
cd openfpm_io/src && make
numerics:
cd openfpm_numerics/src && make
actual_test:
cd src && make actual_test
test_pdata:
cd src && make test
test_data:
cd openfpm_data/src && make test
test_devices:
cd openfpm_devices/src && make test
test_vcluster:
cd openfpm_vcluster/src && make test
test_io:
cd openfpm_io/src && make test
test_numerics:
cd openfpm_numerics/src && make test
test: test_devices test_data test_vcluster test_pdata test_io test_numerics
.PHONY: test_pdata test_data test_devices test_vcluster test_io test_numerics
......@@ -10,7 +10,6 @@
*
* This example shows more in details the functionalities of **ghost_get** and **ghost_put** for a distributed vector.
*
*
* ## Inclusion ## {#e1_v_inclusion}
*
* We activate the vector_dist functionalities
......@@ -175,7 +174,7 @@ int main(int argc, char* argv[])
* Before and after. The blue arrows in the first image indicate the vector field
* for the real particles. In the second image instead the red arrow indicate the
* vector field for the real particle. The blue arrow indicate the ghosts. We can
* note that the blue arrow doea not contain the correct vector. The reason is that
* note that the blue arrow does not contain the correct vector. The reason is that
* when we used **ghost_get** we synchronized the scalar, and the tensor, but not the vector.
*
* \see \ref e1_part_ghost
......@@ -187,6 +186,34 @@ int main(int argc, char* argv[])
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/after_ghost_get.jpg"/>
* \endhtmlonly
*
* ## So ... how I have to put these ghost_get ##
*
* The first thing to do is to place the ghost in a way that the program work
* in parallel for sure. In order to do this we can do the following reasoning:
* If we have a loop over particles we distinguish two type of loops:
*
* * A loop that iterate over particles
* * A loop that iterate over particles and neighborhood particles
*
*
* If the loop is of the first type (you do not loop over the neighborhood particles)
* ghost_get is not necessary. If I am in the second case I need a ghost_get. The
* second point is which property I have to synchronize ghost_get<...>(), or more
* practically what I have to put in the ... . To answer this we have to check all
* the properties that we use from the neighborhood particles and pass it to ghost_get
* as a list. To summarize:
\code{.unparsed}
I am doing a simple loop over particles (1), or I am looping also over neighborhood particles (2)?
For the case (1) the answer is "I do not need ghost_get". For the case (2) the answer is "I need ghost_get"
if I am on the case (2) the second question is which parameters should I use ?
The answer is look at all vd.getProp<...>(b) where b is a neighborhood particle. All ... properties should appear in
ghost_get<...>()
\endcode
* This reasoning is always enough to have ghost_get function always placed correctly. For
* more fine tuning look at the options below
*
*/
......
......@@ -16,7 +16,7 @@
int main(int argc, char ** argv)
{
CartesianGraphFactory<2,Graph_CSR<nm_v,nm_e>> g_factory;
CartesianGraphFactory<2,Graph_CSR<nm_v<2>,nm_e>> g_factory;
// Cartesian grid
size_t sz[2] = {20,20};
......@@ -28,19 +28,19 @@ int main(int argc, char ** argv)
// Graph to decompose
Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
Graph_CSR<nm_v<2>,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
// Convert the graph to metis
Metis<Graph_CSR<nm_v,nm_e>> met(g,4);
Metis<Graph_CSR<nm_v<2>,nm_e>> met(g,4);
// decompose
met.decompose<nm_v::id>();
met.decompose<nm_v_id>();
// Write the decomposition
VTKWriter<Graph_CSR<nm_v,nm_e>,VTK_GRAPH> vtk(g);
VTKWriter<Graph_CSR<nm_v<2>,nm_e>,VTK_GRAPH> vtk(g);
vtk.write("Metis/vtk_partition.vtk");
}
......
openfpm_data @ f982d0ac
Subproject commit 808312d7af8c8f94c746a6d6213737a1d5d506f4
Subproject commit f982d0ac3f3ae0eefbd1ca9e39a700b8898f8ee9
openfpm_devices @ 9f473a4f
Subproject commit f3508e0b1535cf724a7376c38ab58f3ac4b697e8
Subproject commit 9f473a4f9e8bd1301fa9721bdb384b15f763aec6
openfpm_io @ eafa24bd
Subproject commit 10a8194fadb8009f1dc9cb22d0f118d612170267
Subproject commit eafa24bd983173dfd9c4e9c49e88b160096066ff
openfpm_vcluster @ 95f93aaa
Subproject commit df344ad1e781b02b91b111e0904bd06d9970b042
Subproject commit 95f93aaa9ef1df480d9daf8d2f8c4295513c9f9b
......@@ -14,10 +14,14 @@ else()
set(CUDA_SOURCES)
endif()
add_executable(pdata ${OPENFPM_INIT_FILE} ${CUDA_SOURCES} main.cpp
add_executable(pdata ${OPENFPM_INIT_FILE} ${CUDA_SOURCES} main.cpp
Amr/grid_dist_amr_unit_tests.cpp
Amr/tests/amr_base_unit_tests.cpp
Debug/debug_test.cpp
Grid/tests/grid_dist_id_HDF5_chckpnt_restart_test.cpp
Grid/tests/grid_dist_id_unit_test.cpp
Grid/tests/sgrid_dist_id_unit_tests.cpp
#Grid/tests/grid_dist_id_dlb_unit_test.cpp
Grid/tests/staggered_grid_dist_unit_test.cpp
Vector/tests/vector_dist_cell_list_tests.cpp
Vector/tests/vector_dist_complex_prp_unit_test.cpp
......
......@@ -378,7 +378,7 @@ public:
// Optimize the decomposition creating bigger spaces
// And reducing Ghost over-stress
dec_optimizer<dim, Graph_CSR<nm_v, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
dec_optimizer<dim, Graph_CSR<nm_v<dim>, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
// Ghost
Ghost<dim,long int> ghe;
......@@ -391,7 +391,7 @@ public:
}
// optimize the decomposition
d_o.template optimize<nm_v::sub_id, nm_v::proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
d_o.template optimize<nm_v_sub_id, nm_v_proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
// Initialize
if (loc_box.size() > 0)
......@@ -454,9 +454,6 @@ public:
// Check if the box is valid
if (bound.isValidN() == true)
{
// Not necessary, but I prefer
bound.enlarge(ghost);
// calculate the sub-divisions
size_t div[dim];
for (size_t i = 0; i < dim; i++)
......@@ -873,7 +870,10 @@ public:
cart.cd = cd;
cart.domain = domain;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.bbox = bbox;
cart.ghost = g;
......@@ -916,7 +916,10 @@ public:
cart.domain = domain;
cart.sub_domains_global = sub_domains_global;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.ghost = ghost;
......@@ -1027,6 +1030,9 @@ public:
dist = cart.dist;
commCostSet = cart.commCostSet;
cd = cart.cd;
gr_dist = cart.gr_dist;
dist = cart.dist;
domain = cart.domain;
sub_domains_global.swap(cart.sub_domains_global);
......@@ -1842,6 +1848,23 @@ public:
return v_cl;
}
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_geo_cell()
{
ie_ghost<dim,T,Memory,layout_base>::free_geo_cell();
}
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_fines()
{
fine_s.clear();
fine_s.destroy();
}
/*! \brief function to check the consistency of the information of the decomposition
*
* \return false if is inconsistent
......
......@@ -26,10 +26,10 @@ class DistParMetisDistribution
Box<dim, T> domain;
//! Processor sub-sub-domain graph
DistGraph_CSR<nm_v, nm_e> g;
DistGraph_CSR<nm_v<dim>, nm_e> g;
//! Convert the graph to parmetis format
DistParmetis<DistGraph_CSR<nm_v, nm_e>> parmetis_graph;
DistParmetis<DistGraph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Init vtxdist needed for Parmetis
openfpm::vector<idx_t> vtxdist;
......@@ -91,20 +91,20 @@ public:
domain = dom;
//! Create sub graph
DistGraphFactory<dim, DistGraph_CSR<nm_v, nm_e>> dist_g_factory;
DistGraphFactory<dim, DistGraph_CSR<nm_v<dim>, nm_e>> dist_g_factory;
g = dist_g_factory.template construct<NO_EDGE, T, dim - 1, 0>(gr.getSize(), domain);
g.getDecompositionVector(vtxdist);
if (dim == 2)
for (size_t i = 0; i < g.getNVertex(); i++)
g.vertex(i).template get<nm_v::x>()[2] = 0;
g.vertex(i).template get<nm_v_x>()[2] = 0;
}
/*! \brief Get the current graph (main)
*
*/
DistGraph_CSR<nm_v, nm_e> & getGraph()
DistGraph_CSR<nm_v<dim>, nm_e> & getGraph()
{
return g;
}
......@@ -118,7 +118,7 @@ public:
parmetis_graph.initSubGraph(g);
//! Decompose
parmetis_graph.decompose<nm_v::proc_id>(g);
parmetis_graph.template decompose<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -143,7 +143,7 @@ public:
parmetis_graph.reset(g);
//! Refine
parmetis_graph.refine<nm_v::proc_id>(g);
parmetis_graph.template refine<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -194,10 +194,10 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << " Position - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
pos[0] = g.vertex(id).template get<nm_v::x>()[0];
pos[1] = g.vertex(id).template get<nm_v::x>()[1];
pos[0] = g.vertex(id).template get<nm_v_x>()[0];
pos[1] = g.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = g.vertex(id).template get<nm_v::x>()[2];
pos[2] = g.vertex(id).template get<nm_v_x>()[2];
}
/*! \brief Function that set the weight of the vertex
......@@ -215,7 +215,7 @@ public:
#endif
// If the vertex is inside this processor update the value
g.vertex(id).template get<nm_v::computation>() = weight;
g.vertex(id).template get<nm_v_computation>() = weight;
}
......@@ -242,7 +242,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getTotNVertex() << ")\n";
#endif
return g.vertex(id).template get<nm_v::computation>();
return g.vertex(id).template get<nm_v_computation>();
}
/*! \brief Compute the processor load counting the total weights of its vertices
......@@ -255,7 +255,7 @@ public:
for (size_t i = 0; i < g.getNVertex(); i++)
{
load += g.vertex(i).template get<nm_v::computation>();
load += g.vertex(i).template get<nm_v_computation>();
}
return load;
}
......@@ -292,7 +292,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Migration - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
g.vertex(id).template get<nm_v::migration>() = migration;
g.vertex(id).template get<nm_v_migration>() = migration;
}
/*! \brief Set communication cost of the edge id
......@@ -338,7 +338,7 @@ public:
*/
void write(const std::string & file)
{
VTKWriter<DistGraph_CSR<nm_v, nm_e>, DIST_GRAPH> gv2(g);
VTKWriter<DistGraph_CSR<nm_v<dim>, nm_e>, DIST_GRAPH> gv2(g);
gv2.write(std::to_string(file + ".vtk"));
}
......
......@@ -53,13 +53,13 @@ class MetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Flag that indicate if we are doing a test (In general it fix the seed)
bool testing = false;
//! Metis decomposer utility
Metis<Graph_CSR<nm_v, nm_e>> metis_graph;
Metis<Graph_CSR<nm_v<dim>, nm_e>> metis_graph;
//! unordered map that map global sub-sub-domain to owned_cost_sub id
std::unordered_map<size_t,size_t> owner_scs;
......@@ -104,7 +104,7 @@ class MetisDistribution
public:
static constexpr unsigned int computation = nm_v::computation;
static constexpr unsigned int computation = nm_v_computation;
/*! \brief constructor
*
......@@ -182,20 +182,20 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
// Init to 0.0 axis z (to fix in graphFactory)
if (dim < 3)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
}
}
for (size_t i = 0; i < gp.getNVertex(); i++)
gp.vertex(i).template get<nm_v::global_id>() = i;
gp.vertex(i).template get<nm_v_global_id>() = i;
}
/*! \brief Get the current graph (main)
......@@ -203,7 +203,7 @@ public:
* \return the current sub-sub domain Graph
*
*/
Graph_CSR<nm_v, nm_e> & getGraph()
Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{
#ifdef SE_CLASS2
check_valid(this,8);
......@@ -230,7 +230,7 @@ public:
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
gp.template vertex_p<nm_v::computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
metis_graph.initMetisGraph(v_cl.getProcessingUnits(),true);
}
......@@ -239,13 +239,13 @@ public:
metis_graph.onTest(testing);
// decompose
metis_graph.decompose<nm_v::proc_id>();
metis_graph.template decompose<nm_v_proc_id>();
if (recv_ass.size() != 0)
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id);
}
else
{
......@@ -255,7 +255,7 @@ public:
for (size_t i = 0 ; i < gp.getNVertex() ; i++)
{
recv_ass.get(i).id = i;
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(i);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(i);
}
}
}
......@@ -277,7 +277,7 @@ public:
// Fill the metis graph
for (size_t i = 0 ; i < recv_ass.size() ; i++)
{
gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
if (recv_ass.get(i).w == v_cl.getProcessUnitID())
{
......@@ -330,10 +330,10 @@ public:
check_overflow(id);
// Copy the geometrical informations inside the pos vector
pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
pos[0] = gp.vertex(id).template get<nm_v_x>()[0];
pos[1] = gp.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
{pos[2] = gp.vertex(id).template get<nm_v_x>()[2];}
}
/*! \brief function that get the computational cost of the sub-sub-domain
......@@ -349,7 +349,7 @@ public:
check_valid(this,8);
#endif
check_overflow(id);
return gp.vertex(id).template get<nm_v::computation>();
return gp.vertex(id).template get<nm_v_computation>();
}
......@@ -394,7 +394,7 @@ public:
check_overflow(id);
#endif
gp.vertex(id).template get<nm_v::migration>() = cost;
gp.vertex(id).template get<nm_v_migration>() = cost;
}
/*! \brief Set communication cost between neighborhood sub-sub-domains (weight on the edge)
......@@ -522,7 +522,7 @@ public:
check_valid(this,8);
#endif
VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
VTKWriter<Graph_CSR<nm_v<dim>, nm_e>, VTK_GRAPH> gv2(gp);
gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + out + ".vtk");
}
......@@ -545,7 +545,7 @@ public:
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
loads.get(gp.template vertex_p<nm_v::proc_id>(i)) += gp.template vertex_p<nm_v::computation>(i);
{loads.get(gp.template vertex_p<nm_v_proc_id>(i)) += gp.template vertex_p<nm_v_computation>(i);}
for (size_t i = 0 ; i < v_cl.getProcessingUnits() ; i++)
{
......
......@@ -48,10 +48,10 @@ class ParMetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Convert the graph to parmetis format
Parmetis<Graph_CSR<nm_v, nm_e>> parmetis_graph;
Parmetis<Graph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Id of the sub-sub-domain where we set the costs
openfpm::vector<size_t> sub_sub_owner;
......@@ -115,10 +115,10 @@ class ParMetisDistribution
auto v_id = m2g.find(l)->second.id;
// Update proc id in the vertex (using the old map)
gp.template vertex_p<nm_v::proc_id>(v_id) = partitions.get(i).get(k);
gp.template vertex_p<nm_v_proc_id>(v_id) = partitions.get(i).get(k);
if (partitions.get(i).get(k) == (long int)v_cl.getProcessUnitID())
sub_sub_owner.add(v_id);
{sub_sub_owner.add(v_id);}
// Add vertex to temporary structure of distribution (needed to update main graph)
v_per_proc.get(partitions.get(i).get(k)).add(getVertexGlobalId(l));
......@@ -138,12 +138,12 @@ class ParMetisDistribution
for (size_t i = 0 ; i < gp.getNVertex(); ++i)
{
size_t pid = gp.template vertex_p<nm_v::proc_id>(i);
size_t pid = gp.template vertex_p<nm_v_proc_id>(i);
rid j = rid(vtxdist.get(pid).id + cnt.get(pid));
gid gi = gid(i);
gp.template vertex_p<nm_v::id>(i) = j.id;
gp.template vertex_p<nm_v_id>(i) = j.id;
cnt.get(pid)++;
setMapId(j,gi);
......@@ -328,8 +328,8 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
initLocalToGlobalMap();
//! Get the number of processing units
......@@ -354,12 +354,12 @@ public:
{
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
}
}
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::global_id>() = i;
gp.vertex(i).template get<nm_v_global_id>() = i;
}
}
......@@ -367,7 +367,7 @@ public:
/*! \brief Get the current graph (main)
*
*/
Graph_CSR<nm_v, nm_e> & getGraph()
Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{
return gp;
}
......@@ -466,10 +466,10 @@ public:
#endif
// Copy the geometrical informations inside the pos vector
pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
pos[0] = gp.vertex(id).template get<nm_v_x>()[0];
pos[1] = gp.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
pos[2] = gp.vertex(id).template get<nm_v_x>()[2];
}
/*! \brief Function that set the weight of the vertex
......@@ -481,15 +481,15 @@ public:
inline void setComputationCost(size_t id, size_t weight)
{
if (!verticesGotWeights)
verticesGotWeights = true;
{verticesGotWeights = true;}
#ifdef SE_CLASS1
if (id >= gp.getNVertex())
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
{std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";}
#endif
// Update vertex in main graph
gp.vertex(id).template get<nm_v::computation>() = weight;
gp.vertex(id).template get<nm_v_computation>() = weight;
}
/*! \brief Checks if weights are used on the vertices
......@@ -513,7 +513,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
return gp.vertex(id).template get<nm_v::computation>();
return gp.vertex(id).template get<nm_v_computation>();
}
/*! \brief Compute the processor load counting the total weights of its vertices
......@@ -529,7 +529,7 @@ public:
for (rid i = vtxdist.get(p_id); i < vtxdist.get(p_id+1) ; ++i)
load += gp.vertex(m2g.find(i)->second.id).template get<nm_v::computation>();
load += gp.vertex(m2g.find(i)->second.id).template get<nm_v_computation>();
//std::cout << v_cl.getProcessUnitID() << " weight " << load << " size " << sub_g.getNVertex() << "\n";
return load;
......@@ -547,7 +547,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
gp.vertex(id).template get<nm_v::migration>() = migration;
gp.vertex(id).template get<nm_v_migration>() = migration;
}
/*! \brief Set communication cost of the edge id
......@@ -618,6 +618,22 @@ public:
return gp.getNChilds(id);
}
/*! \brief In case we do not do Dynamic load balancing this this data-structure it is safe to eliminate the full internal graph
*
*
*
*/
void destroy_internal_graph()
{
gp.destroy();
partitions.clear();
partitions.shrink_to_fit();
v_per_proc.clear();
v_per_proc.shrink_to_fit();
m2g.clear();
m2g.rehash(0);
}
/*! \brief Print the current distribution and save it to VTK file
*
* \param file filename
......@@ -625,7 +641,7 @@ public:
*/
void write(const std::string & file)
{
VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
VTKWriter<Graph_CSR<nm_v<dim>, nm_e>, VTK_GRAPH> gv2(gp);
gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + file + ".vtk");
}
......@@ -663,6 +679,18 @@ public:
return *this;
}
/*! \brief return the the position of the sub-sub-domain
*
* \param i sub-sub-domain id