Commit 46aeaf56 authored by incardon's avatar incardon

Grid with sparse_cl

parent 9be1da26
#!groovy #!groovy
timeout(180)
{
parallel ( parallel (
...@@ -123,4 +126,5 @@ parallel ( ...@@ -123,4 +126,5 @@ parallel (
} }
) )
}
SUBDIRS = src images openfpm_data openfpm_io openfpm_devices openfpm_vcluster openfpm_numerics
ACLOCAL_AMFLAGS = -I m4
bin_PROGRAMS =
pdata:
cd src && make
data:
cd openfpm_data/src && make
devices:
cd openfpm_devices/src && make
vcluster:
cd openfpm_vcluster/src && make
io:
cd openfpm_io/src && make
numerics:
cd openfpm_numerics/src && make
actual_test:
cd src && make actual_test
test_pdata:
cd src && make test
test_data:
cd openfpm_data/src && make test
test_devices:
cd openfpm_devices/src && make test
test_vcluster:
cd openfpm_vcluster/src && make test
test_io:
cd openfpm_io/src && make test
test_numerics:
cd openfpm_numerics/src && make test
test: test_devices test_data test_vcluster test_pdata test_io test_numerics
.PHONY: test_pdata test_data test_devices test_vcluster test_io test_numerics
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
* *
* This example shows more in details the functionalities of **ghost_get** and **ghost_put** for a distributed vector. * This example shows more in details the functionalities of **ghost_get** and **ghost_put** for a distributed vector.
* *
*
* ## Inclusion ## {#e1_v_inclusion} * ## Inclusion ## {#e1_v_inclusion}
* *
* We activate the vector_dist functionalities * We activate the vector_dist functionalities
...@@ -175,7 +174,7 @@ int main(int argc, char* argv[]) ...@@ -175,7 +174,7 @@ int main(int argc, char* argv[])
* Before and after. The blue arrows in the first image indicate the vector field * Before and after. The blue arrows in the first image indicate the vector field
* for the real particles. In the second image instead the red arrow indicate the * for the real particles. In the second image instead the red arrow indicate the
* vector field for the real particle. The blue arrow indicate the ghosts. We can * vector field for the real particle. The blue arrow indicate the ghosts. We can
* note that the blue arrow doea not contain the correct vector. The reason is that * note that the blue arrow does not contain the correct vector. The reason is that
* when we used **ghost_get** we synchronized the scalar, and the tensor, but not the vector. * when we used **ghost_get** we synchronized the scalar, and the tensor, but not the vector.
* *
* \see \ref e1_part_ghost * \see \ref e1_part_ghost
...@@ -187,6 +186,34 @@ int main(int argc, char* argv[]) ...@@ -187,6 +186,34 @@ int main(int argc, char* argv[])
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/after_ghost_get.jpg"/> * <img src="http://ppmcore.mpi-cbg.de/web/images/examples/after_ghost_get.jpg"/>
* \endhtmlonly * \endhtmlonly
* *
* ## So ... how I have to put these ghost_get ##
*
* The first thing to do is to place the ghost in a way that the program work
* in parallel for sure. In order to do this we can do the following reasoning:
* If we have a loop over particles we distinguish two type of loops:
*
* * A loop that iterate over particles
* * A loop that iterate over particles and neighborhood particles
*
*
* If the loop is of the first type (you do not loop over the neighborhood particles)
* ghost_get is not necessary. If I am in the second case I need a ghost_get. The
* second point is which property I have to synchronize ghost_get<...>(), or more
* practically what I have to put in the ... . To answer this we have to check all
* the properties that we use from the neighborhood particles and pass it to ghost_get
* as a list. To summarize:
\code{.unparsed}
I am doing a simple loop over particles (1), or I am looping also over neighborhood particles (2)?
For the case (1) the answer is "I do not need ghost_get". For the case (2) the answer is "I need ghost_get"
if I am on the case (2) the second question is which parameters should I use ?
The answer is look at all vd.getProp<...>(b) where b is a neighborhood particle. All ... properties should appear in
ghost_get<...>()
\endcode
* This reasoning is always enough to have ghost_get function always placed correctly. For
* more fine tuning look at the options below
* *
*/ */
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
int main(int argc, char ** argv) int main(int argc, char ** argv)
{ {
CartesianGraphFactory<2,Graph_CSR<nm_v,nm_e>> g_factory; CartesianGraphFactory<2,Graph_CSR<nm_v<2>,nm_e>> g_factory;
// Cartesian grid // Cartesian grid
size_t sz[2] = {20,20}; size_t sz[2] = {20,20};
...@@ -28,19 +28,19 @@ int main(int argc, char ** argv) ...@@ -28,19 +28,19 @@ int main(int argc, char ** argv)
// Graph to decompose // Graph to decompose
Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc); Graph_CSR<nm_v<2>,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
// Convert the graph to metis // Convert the graph to metis
Metis<Graph_CSR<nm_v,nm_e>> met(g,4); Metis<Graph_CSR<nm_v<2>,nm_e>> met(g,4);
// decompose // decompose
met.decompose<nm_v::id>(); met.decompose<nm_v_id>();
// Write the decomposition // Write the decomposition
VTKWriter<Graph_CSR<nm_v,nm_e>,VTK_GRAPH> vtk(g); VTKWriter<Graph_CSR<nm_v<2>,nm_e>,VTK_GRAPH> vtk(g);
vtk.write("Metis/vtk_partition.vtk"); vtk.write("Metis/vtk_partition.vtk");
} }
......
openfpm_data @ f982d0ac
Subproject commit 808312d7af8c8f94c746a6d6213737a1d5d506f4 Subproject commit f982d0ac3f3ae0eefbd1ca9e39a700b8898f8ee9
openfpm_devices @ 9f473a4f
Subproject commit f3508e0b1535cf724a7376c38ab58f3ac4b697e8 Subproject commit 9f473a4f9e8bd1301fa9721bdb384b15f763aec6
openfpm_io @ eafa24bd
Subproject commit 10a8194fadb8009f1dc9cb22d0f118d612170267 Subproject commit eafa24bd983173dfd9c4e9c49e88b160096066ff
openfpm_vcluster @ 95f93aaa
Subproject commit df344ad1e781b02b91b111e0904bd06d9970b042 Subproject commit 95f93aaa9ef1df480d9daf8d2f8c4295513c9f9b
...@@ -15,9 +15,13 @@ else() ...@@ -15,9 +15,13 @@ else()
endif() endif()
add_executable(pdata ${OPENFPM_INIT_FILE} ${CUDA_SOURCES} main.cpp add_executable(pdata ${OPENFPM_INIT_FILE} ${CUDA_SOURCES} main.cpp
Amr/grid_dist_amr_unit_tests.cpp
Amr/tests/amr_base_unit_tests.cpp
Debug/debug_test.cpp Debug/debug_test.cpp
Grid/tests/grid_dist_id_HDF5_chckpnt_restart_test.cpp Grid/tests/grid_dist_id_HDF5_chckpnt_restart_test.cpp
Grid/tests/grid_dist_id_unit_test.cpp Grid/tests/grid_dist_id_unit_test.cpp
Grid/tests/sgrid_dist_id_unit_tests.cpp
#Grid/tests/grid_dist_id_dlb_unit_test.cpp
Grid/tests/staggered_grid_dist_unit_test.cpp Grid/tests/staggered_grid_dist_unit_test.cpp
Vector/tests/vector_dist_cell_list_tests.cpp Vector/tests/vector_dist_cell_list_tests.cpp
Vector/tests/vector_dist_complex_prp_unit_test.cpp Vector/tests/vector_dist_complex_prp_unit_test.cpp
......
...@@ -378,7 +378,7 @@ public: ...@@ -378,7 +378,7 @@ public:
// Optimize the decomposition creating bigger spaces // Optimize the decomposition creating bigger spaces
// And reducing Ghost over-stress // And reducing Ghost over-stress
dec_optimizer<dim, Graph_CSR<nm_v, nm_e>> d_o(dist.getGraph(), gr_dist.getSize()); dec_optimizer<dim, Graph_CSR<nm_v<dim>, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
// Ghost // Ghost
Ghost<dim,long int> ghe; Ghost<dim,long int> ghe;
...@@ -391,7 +391,7 @@ public: ...@@ -391,7 +391,7 @@ public:
} }
// optimize the decomposition // optimize the decomposition
d_o.template optimize<nm_v::sub_id, nm_v::proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc); d_o.template optimize<nm_v_sub_id, nm_v_proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
// Initialize // Initialize
if (loc_box.size() > 0) if (loc_box.size() > 0)
...@@ -454,9 +454,6 @@ public: ...@@ -454,9 +454,6 @@ public:
// Check if the box is valid // Check if the box is valid
if (bound.isValidN() == true) if (bound.isValidN() == true)
{ {
// Not necessary, but I prefer
bound.enlarge(ghost);
// calculate the sub-divisions // calculate the sub-divisions
size_t div[dim]; size_t div[dim];
for (size_t i = 0; i < dim; i++) for (size_t i = 0; i < dim; i++)
...@@ -873,7 +870,10 @@ public: ...@@ -873,7 +870,10 @@ public:
cart.cd = cd; cart.cd = cd;
cart.domain = domain; cart.domain = domain;
for (size_t i = 0 ; i < dim ; i++) for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];}; {
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.bbox = bbox; cart.bbox = bbox;
cart.ghost = g; cart.ghost = g;
...@@ -916,7 +916,10 @@ public: ...@@ -916,7 +916,10 @@ public:
cart.domain = domain; cart.domain = domain;
cart.sub_domains_global = sub_domains_global; cart.sub_domains_global = sub_domains_global;
for (size_t i = 0 ; i < dim ; i++) for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];}; {
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.ghost = ghost; cart.ghost = ghost;
...@@ -1027,6 +1030,9 @@ public: ...@@ -1027,6 +1030,9 @@ public:
dist = cart.dist; dist = cart.dist;
commCostSet = cart.commCostSet; commCostSet = cart.commCostSet;
cd = cart.cd; cd = cart.cd;
gr_dist = cart.gr_dist;
dist = cart.dist;
domain = cart.domain; domain = cart.domain;
sub_domains_global.swap(cart.sub_domains_global); sub_domains_global.swap(cart.sub_domains_global);
...@@ -1842,6 +1848,23 @@ public: ...@@ -1842,6 +1848,23 @@ public:
return v_cl; return v_cl;
} }
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_geo_cell()
{
ie_ghost<dim,T,Memory,layout_base>::free_geo_cell();
}
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_fines()
{
fine_s.clear();
fine_s.destroy();
}
/*! \brief function to check the consistency of the information of the decomposition /*! \brief function to check the consistency of the information of the decomposition
* *
* \return false if is inconsistent * \return false if is inconsistent
......
...@@ -26,10 +26,10 @@ class DistParMetisDistribution ...@@ -26,10 +26,10 @@ class DistParMetisDistribution
Box<dim, T> domain; Box<dim, T> domain;
//! Processor sub-sub-domain graph //! Processor sub-sub-domain graph
DistGraph_CSR<nm_v, nm_e> g; DistGraph_CSR<nm_v<dim>, nm_e> g;
//! Convert the graph to parmetis format //! Convert the graph to parmetis format
DistParmetis<DistGraph_CSR<nm_v, nm_e>> parmetis_graph; DistParmetis<DistGraph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Init vtxdist needed for Parmetis //! Init vtxdist needed for Parmetis
openfpm::vector<idx_t> vtxdist; openfpm::vector<idx_t> vtxdist;
...@@ -91,20 +91,20 @@ public: ...@@ -91,20 +91,20 @@ public:
domain = dom; domain = dom;
//! Create sub graph //! Create sub graph
DistGraphFactory<dim, DistGraph_CSR<nm_v, nm_e>> dist_g_factory; DistGraphFactory<dim, DistGraph_CSR<nm_v<dim>, nm_e>> dist_g_factory;
g = dist_g_factory.template construct<NO_EDGE, T, dim - 1, 0>(gr.getSize(), domain); g = dist_g_factory.template construct<NO_EDGE, T, dim - 1, 0>(gr.getSize(), domain);
g.getDecompositionVector(vtxdist); g.getDecompositionVector(vtxdist);
if (dim == 2) if (dim == 2)
for (size_t i = 0; i < g.getNVertex(); i++) for (size_t i = 0; i < g.getNVertex(); i++)
g.vertex(i).template get<nm_v::x>()[2] = 0; g.vertex(i).template get<nm_v_x>()[2] = 0;
} }
/*! \brief Get the current graph (main) /*! \brief Get the current graph (main)
* *
*/ */
DistGraph_CSR<nm_v, nm_e> & getGraph() DistGraph_CSR<nm_v<dim>, nm_e> & getGraph()
{ {
return g; return g;
} }
...@@ -118,7 +118,7 @@ public: ...@@ -118,7 +118,7 @@ public:
parmetis_graph.initSubGraph(g); parmetis_graph.initSubGraph(g);
//! Decompose //! Decompose
parmetis_graph.decompose<nm_v::proc_id>(g); parmetis_graph.template decompose<nm_v_proc_id>(g);
//! Get result partition for this processors //! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition(); idx_t *partition = parmetis_graph.getPartition();
...@@ -143,7 +143,7 @@ public: ...@@ -143,7 +143,7 @@ public:
parmetis_graph.reset(g); parmetis_graph.reset(g);
//! Refine //! Refine
parmetis_graph.refine<nm_v::proc_id>(g); parmetis_graph.template refine<nm_v_proc_id>(g);
//! Get result partition for this processors //! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition(); idx_t *partition = parmetis_graph.getPartition();
...@@ -194,10 +194,10 @@ public: ...@@ -194,10 +194,10 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << " Position - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n"; std::cerr << __FILE__ << ":" << __LINE__ << " Position - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif #endif
pos[0] = g.vertex(id).template get<nm_v::x>()[0]; pos[0] = g.vertex(id).template get<nm_v_x>()[0];
pos[1] = g.vertex(id).template get<nm_v::x>()[1]; pos[1] = g.vertex(id).template get<nm_v_x>()[1];
if (dim == 3) if (dim == 3)
pos[2] = g.vertex(id).template get<nm_v::x>()[2]; pos[2] = g.vertex(id).template get<nm_v_x>()[2];
} }
/*! \brief Function that set the weight of the vertex /*! \brief Function that set the weight of the vertex
...@@ -215,7 +215,7 @@ public: ...@@ -215,7 +215,7 @@ public:
#endif #endif
// If the vertex is inside this processor update the value // If the vertex is inside this processor update the value
g.vertex(id).template get<nm_v::computation>() = weight; g.vertex(id).template get<nm_v_computation>() = weight;
} }
...@@ -242,7 +242,7 @@ public: ...@@ -242,7 +242,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getTotNVertex() << ")\n"; std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getTotNVertex() << ")\n";
#endif #endif
return g.vertex(id).template get<nm_v::computation>(); return g.vertex(id).template get<nm_v_computation>();
} }
/*! \brief Compute the processor load counting the total weights of its vertices /*! \brief Compute the processor load counting the total weights of its vertices
...@@ -255,7 +255,7 @@ public: ...@@ -255,7 +255,7 @@ public:
for (size_t i = 0; i < g.getNVertex(); i++) for (size_t i = 0; i < g.getNVertex(); i++)
{ {
load += g.vertex(i).template get<nm_v::computation>(); load += g.vertex(i).template get<nm_v_computation>();
} }
return load; return load;
} }
...@@ -292,7 +292,7 @@ public: ...@@ -292,7 +292,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Migration - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n"; std::cerr << __FILE__ << ":" << __LINE__ << "Migration - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif #endif
g.vertex(id).template get<nm_v::migration>() = migration; g.vertex(id).template get<nm_v_migration>() = migration;
} }
/*! \brief Set communication cost of the edge id /*! \brief Set communication cost of the edge id
...@@ -338,7 +338,7 @@ public: ...@@ -338,7 +338,7 @@ public:
*/ */
void write(const std::string & file) void write(const std::string & file)
{ {
VTKWriter<DistGraph_CSR<nm_v, nm_e>, DIST_GRAPH> gv2(g); VTKWriter<DistGraph_CSR<nm_v<dim>, nm_e>, DIST_GRAPH> gv2(g);
gv2.write(std::to_string(file + ".vtk")); gv2.write(std::to_string(file + ".vtk"));
} }
......
...@@ -53,13 +53,13 @@ class MetisDistribution ...@@ -53,13 +53,13 @@ class MetisDistribution
Box<dim, T> domain; Box<dim, T> domain;
//! Global sub-sub-domain graph //! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp; Graph_CSR<nm_v<dim>, nm_e> gp;
//! Flag that indicate if we are doing a test (In general it fix the seed) //! Flag that indicate if we are doing a test (In general it fix the seed)
bool testing = false; bool testing = false;
//! Metis decomposer utility //! Metis decomposer utility
Metis<Graph_CSR<nm_v, nm_e>> metis_graph; Metis<Graph_CSR<nm_v<dim>, nm_e>> metis_graph;
//! unordered map that map global sub-sub-domain to owned_cost_sub id //! unordered map that map global sub-sub-domain to owned_cost_sub id
std::unordered_map<size_t,size_t> owner_scs; std::unordered_map<size_t,size_t> owner_scs;
...@@ -104,7 +104,7 @@ class MetisDistribution ...@@ -104,7 +104,7 @@ class MetisDistribution
public: public:
static constexpr unsigned int computation = nm_v::computation; static constexpr unsigned int computation = nm_v_computation;
/*! \brief constructor /*! \brief constructor
* *
...@@ -182,20 +182,20 @@ public: ...@@ -182,20 +182,20 @@ public:
domain = dom; domain = dom;
// Create a cartesian grid graph // Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part; CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc); gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
// Init to 0.0 axis z (to fix in graphFactory) // Init to 0.0 axis z (to fix in graphFactory)
if (dim < 3) if (dim < 3)
{ {
for (size_t i = 0; i < gp.getNVertex(); i++) for (size_t i = 0; i < gp.getNVertex(); i++)
{ {
gp.vertex(i).template get<nm_v::x>()[2] = 0.0; gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
} }
} }
for (size_t i = 0; i < gp.getNVertex(); i++) for (size_t i = 0; i < gp.getNVertex(); i++)
gp.vertex(i).template get<nm_v::global_id>() = i; gp.vertex(i).template get<nm_v_global_id>() = i;
} }
/*! \brief Get the current graph (main) /*! \brief Get the current graph (main)
...@@ -203,7 +203,7 @@ public: ...@@ -203,7 +203,7 @@ public:
* \return the current sub-sub domain Graph * \return the current sub-sub domain Graph
* *
*/ */
Graph_CSR<nm_v, nm_e> & getGraph() Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{ {
#ifdef SE_CLASS2 #ifdef SE_CLASS2
check_valid(this,8); check_valid(this,8);
...@@ -230,7 +230,7 @@ public: ...@@ -230,7 +230,7 @@ public:
{ {
// we fill the assignment // we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++) for (size_t i = 0 ; i < recv_ass.size() ; i++)
gp.template vertex_p<nm_v::computation>(recv_ass.get(i).id) = recv_ass.get(i).w; gp.template vertex_p<nm_v_computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
metis_graph.initMetisGraph(v_cl.getProcessingUnits(),true); metis_graph.initMetisGraph(v_cl.getProcessingUnits(),true);
} }
...@@ -239,13 +239,13 @@ public: ...@@ -239,13 +239,13 @@ public:
metis_graph.onTest(testing); metis_graph.onTest(testing);
// decompose // decompose
metis_graph.decompose<nm_v::proc_id>(); metis_graph.template decompose<nm_v_proc_id>();
if (recv_ass.size() != 0) if (recv_ass.size() != 0)