...
 
Commits (61)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Change Log
All notable changes to this project will be documented in this file.
## [1.1.0] Development
### Fixed
- Bug fix: grid with external decomposition accept ghost argument but they do not change the ghost size (with the exception of one constructor).
Now they all accept ghost argument and change the ghost size
## [1.0.0] 13 September 2017
### Added
......
#!groovy
timeout(180)
{
parallel (
......@@ -123,4 +126,5 @@ parallel (
}
)
}
......@@ -321,7 +321,7 @@ int main(int argc, char* argv[])
// visualization
if (i % 100 == 0)
{
Old.write("output",count);
Old.write_frame("output",count);
count++;
}
}
......
......@@ -107,8 +107,8 @@ int main(int argc, char* argv[])
size_t timeSteps = 5000;
// K and F (Physical constant in the equation)
double K = 0.014;
double F = 0.053;
double K = 0.053;
double F = 0.014;
//! \cond [init lib] \endcond
......@@ -149,41 +149,57 @@ int main(int argc, char* argv[])
timer tot_sim;
tot_sim.start();
static grid_key_dx<3> star_stencil_3D[7] = {{0,0,0},
{0,0,-1},
{0,0,1},
{0,-1,0},
{0,1,0},
{-1,0,0},
{1,0,0}};
constexpr int BACK_X = 1;
constexpr int FORWARD_X = 2;
constexpr int BACK_Y = 3;
constexpr int FORWARD_Y = 4;
constexpr int BACK_Z = 5;
constexpr int FORWARD_Z = 6;
for (size_t i = 0; i < timeSteps; ++i)
{
if (i % 300 == 0)
std::cout << "STEP: " << i << std::endl;
auto it = Old.getDomainIterator();
auto it = Old.getDomainIteratorStencil(star_stencil_3D);
while (it.isNext())
{
auto key = it.get();
// center point
auto Cp = it.getStencil<0>();
// update based on Eq 2
New.get<U>(key) = Old.get<U>(key) + uFactor * (
Old.get<U>(key.move(x,1)) +
Old.get<U>(key.move(x,-1)) +
Old.get<U>(key.move(y,1)) +
Old.get<U>(key.move(y,-1)) +
Old.get<U>(key.move(z,1)) +
Old.get<U>(key.move(z,-1)) -
6.0*Old.get<U>(key)) +
- deltaT * Old.get<U>(key) * Old.get<V>(key) * Old.get<V>(key) +
- deltaT * F * (Old.get<U>(key) - 1.0);
New.get<U>(Cp) = Old.get<U>(Cp) + uFactor * (
Old.get<U>(mz) +
Old.get<U>(pz) +
Old.get<U>(my) +
Old.get<U>(py) +
Old.get<U>(mx) +
Old.get<U>(px) -
6.0*Old.get<U>(Cp)) +
- deltaT * Old.get<U>(Cp) * Old.get<V>(Cp) * Old.get<V>(Cp) +
- deltaT * F * (Old.get<U>(Cp) - 1.0);
// update based on Eq 2
New.get<V>(key) = Old.get<V>(key) + vFactor * (
Old.get<V>(key.move(x,1)) +
Old.get<V>(key.move(x,-1)) +
Old.get<V>(key.move(y,1)) +
Old.get<V>(key.move(y,-1)) +
Old.get<V>(key.move(z,1)) +
Old.get<V>(key.move(z,-1)) -
6*Old.get<V>(key)) +
deltaT * Old.get<U>(key) * Old.get<V>(key) * Old.get<V>(key) +
- deltaT * (F+K) * Old.get<V>(key);
New.get<V>(Cp) = Old.get<V>(Cp) + vFactor * (
Old.get<V>(mz) +
Old.get<V>(pz) +
Old.get<V>(my) +
Old.get<V>(py) +
Old.get<V>(mx) +
Old.get<V>(px) -
6*Old.get<V>(Cp)) +
deltaT * Old.get<U>(Cp) * Old.get<V>(Cp) * Old.get<V>(Cp) +
- deltaT * (F+K) * Old.get<V>(Cp);
// Next point in the grid
++it;
......
......@@ -10,7 +10,6 @@
*
* This example shows more in details the functionalities of **ghost_get** and **ghost_put** for a distributed vector.
*
*
* ## Inclusion ## {#e1_v_inclusion}
*
* We activate the vector_dist functionalities
......@@ -175,7 +174,7 @@ int main(int argc, char* argv[])
* Before and after. The blue arrows in the first image indicate the vector field
* for the real particles. In the second image instead the red arrow indicate the
* vector field for the real particle. The blue arrow indicate the ghosts. We can
* note that the blue arrow doea not contain the correct vector. The reason is that
* note that the blue arrow does not contain the correct vector. The reason is that
* when we used **ghost_get** we synchronized the scalar, and the tensor, but not the vector.
*
* \see \ref e1_part_ghost
......@@ -187,6 +186,34 @@ int main(int argc, char* argv[])
* <img src="http://ppmcore.mpi-cbg.de/web/images/examples/after_ghost_get.jpg"/>
* \endhtmlonly
*
* ## So ... how I have to put these ghost_get ##
*
* The first thing to do is to place the ghost in a way that the program work
* in parallel for sure. In order to do this we can do the following reasoning:
* If we have a loop over particles we distinguish two type of loops:
*
* * A loop that iterate over particles
* * A loop that iterate over particles and neighborhood particles
*
*
* If the loop is of the first type (you do not loop over the neighborhood particles)
* ghost_get is not necessary. If I am in the second case I need a ghost_get. The
* second point is which property I have to synchronize ghost_get<...>(), or more
* practically what I have to put in the ... . To answer this we have to check all
* the properties that we use from the neighborhood particles and pass it to ghost_get
* as a list. To summarize:
\code{.unparsed}
I am doing a simple loop over particles (1), or I am looping also over neighborhood particles (2)?
For the case (1) the answer is "I do not need ghost_get". For the case (2) the answer is "I need ghost_get"
if I am on the case (2) the second question is which parameters should I use ?
The answer is look at all vd.getProp<...>(b) where b is a neighborhood particle. All ... properties should appear in
ghost_get<...>()
\endcode
* This reasoning is always enough to have ghost_get function always placed correctly. For
* more fine tuning look at the options below
*
*/
......
......@@ -1444,7 +1444,7 @@ int main(int argc, char* argv[])
// calculate the pressure at the sensor points
sensor_pressure(vd,NN,press_t,probes);
vd.write("Geometry",write);
vd.write_frame("Geometry",write);
write++;
if (v_cl.getProcessUnitID() == 0)
......
......@@ -16,7 +16,7 @@
int main(int argc, char ** argv)
{
CartesianGraphFactory<2,Graph_CSR<nm_v,nm_e>> g_factory;
CartesianGraphFactory<2,Graph_CSR<nm_v<2>,nm_e>> g_factory;
// Cartesian grid
size_t sz[2] = {20,20};
......@@ -28,19 +28,19 @@ int main(int argc, char ** argv)
// Graph to decompose
Graph_CSR<nm_v,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
Graph_CSR<nm_v<2>,nm_e> g = g_factory.construct<nm_e::communication,NO_VERTEX_ID,float,1,0,1>(sz,box,bc);
// Convert the graph to metis
Metis<Graph_CSR<nm_v,nm_e>> met(g,4);
Metis<Graph_CSR<nm_v<2>,nm_e>> met(g,4);
// decompose
met.decompose<nm_v::id>();
met.decompose<nm_v_id>();
// Write the decomposition
VTKWriter<Graph_CSR<nm_v,nm_e>,VTK_GRAPH> vtk(g);
VTKWriter<Graph_CSR<nm_v<2>,nm_e>,VTK_GRAPH> vtk(g);
vtk.write("Metis/vtk_partition.vtk");
}
......
openfpm_data @ a25c2fb3
Subproject commit 261069dff2b91021d274d0db1fe71191633f1ef6
Subproject commit a25c2fb39f182f2e2787d3f0f9416f12aa5a64d9
openfpm_devices @ 98033dea
Subproject commit 2da3b22b477d8b94b60fb9eb5f1a4daacb6857b5
Subproject commit 98033dea8fd01877d50de6bb96078f8b373a4c5a
openfpm_io @ cf184ff5
Subproject commit fac23ddd992dc17d82904bd5083f5235416c2255
Subproject commit cf184ff5cada1f3f39ea144b6e151e8274d8a20f
openfpm_numerics @ 15cfd496
Subproject commit 4e569e3bcec0ac24ebd0b2a30a1b7bf9b602497d
Subproject commit 15cfd496c3c1998d0dae508c044a29aeaba4da02
openfpm_vcluster @ 88038776
Subproject commit a99918127f5835c31d2df4e9020efdeb46d07d66
Subproject commit 880387762a03c1377f8627768bbca40f7d6fb9d2
#! /bin/bash
# check if the directory $1/VCDEVEL exist
if [ -d "$1/VCDEVEL" ]; then
echo "VCDEVEL already installed"
exit 0
fi
wget http://ppmcore.mpi-cbg.de/upload/Vc-1.3.2.tar.gz
#rm -rf Vc
tar -xf Vc-1.3.2.tar.gz
cd Vc-1.3.2
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX:PATH=$1/VCDEVEL ..
make
make install
This diff is collapsed.
/*
* grid_dist_amr_key.hpp
*
* Created on: Sep 23, 2017
* Author: i-bird
*/
#ifndef SRC_AMR_GRID_DIST_AMR_KEY_HPP_
#define SRC_AMR_GRID_DIST_AMR_KEY_HPP_
/*! \brief Amr grid distributed key
*
* \tparam dim dimensionality
*
*/
template<unsigned int dim>
class grid_dist_amr_key
{
//! actual level
size_t lvl;
//! actual position in the distributed grid
grid_dist_key_dx<dim> key;
public:
/*! \constructor
*
* \param lvl level
* \param key distributed grid key
* \param offsets to move between levels
*
*/
inline grid_dist_amr_key(size_t lvl,
grid_dist_key_dx<dim> key)
:lvl(lvl),key(key)
{}
/*! \brief Return the grid key
*
* \return the distributed key
*
*/
inline const grid_dist_key_dx<dim> & getKey() const
{
return key;
}
/*! \brief Return the grid key (as reference)
*
* \return the distributed key
*
*/
inline grid_dist_key_dx<dim> & getKeyRef()
{
return key;
}
/*! \brief Return the level
*
* \return the level
*
*/
inline size_t getLvl() const
{
return lvl;
}
/*! \brief Return the level
*
* \param lvl level to set
*
*/
inline void setLvl(size_t lvl)
{
this->lvl = lvl;
}
/*! \brief Create a new key moving the old one
*
* \param s dimension id
* \param s number of steps
*
* \return new key
*
*/
inline grid_dist_amr_key<dim> moveSpace(size_t d,size_t s)
{
return grid_dist_amr_key<dim>(lvl,key.move(d,s));
}
};
#endif /* SRC_AMR_GRID_DIST_AMR_KEY_HPP_ */
/*
* grid_amr_dist_key_iterator.hpp
*
* Created on: Sep 22, 2017
* Author: i-bird
*/
#ifndef SRC_AMR_GRID_DIST_AMR_KEY_ITERATOR_HPP_
#define SRC_AMR_GRID_DIST_AMR_KEY_ITERATOR_HPP_
#include "Vector/map_vector.hpp"
#include "Grid/Iterators/grid_dist_id_iterator.hpp"
#include "grid_dist_amr_key.hpp"
template<unsigned int dim, typename device_grid, typename device_sub_it, typename it_type = grid_dist_iterator<dim,device_grid,device_sub_it,FREE>>
class grid_dist_amr_key_iterator
{
//! Array of grid iterators
openfpm::vector<it_type> & git;
//! actual it type
struct actual_it
{
it_type & it;
};
//! Actual distributed grid iterator
it_type * a_it;
//! iterator pointer
size_t g_c;
/*! \brief from g_c increment g_c until you find a valid grid
*
*/
void selectValidGrid()
{
// When the grid has size 0 potentially all the other informations are garbage
while (g_c < git.size() && git.get(g_c).isNext() == false ) g_c++;
// get the next grid iterator
if (g_c < git.size())
{
a_it = &git.get(g_c);
}
}
public:
/*! \brief Constructor
*
* \param git vector of iterator
*
*/
grid_dist_amr_key_iterator(openfpm::vector<it_type> & git)
:git(git),g_c(0)
{
a_it = &git.get(0);
selectValidGrid();
}
//! Destructor
~grid_dist_amr_key_iterator()
{
}
/*! \brief Get the next element
*
* \return the next grid_key
*
*/
inline grid_dist_amr_key_iterator<dim,device_grid,device_sub_it,it_type> & operator++()
{
++(*a_it);
// check if a_it is at the end
if (a_it->isNext() == true)
{return *this;}
else
{
// switch to the new iterator
g_c++;
selectValidGrid();
}
return *this;
}
/*! \brief Is there a next point
*
* \return true is there is a next point
*
*/
inline bool isNext()
{
return g_c < git.size();
}
/*! \brief Return the actual AMR grid iterator point
*
*
*/
inline grid_dist_amr_key<dim> get()
{
return grid_dist_amr_key<dim>(g_c,a_it->get());
}
/*! \brief Return the actual global grid position in the AMR struct in global
* coordinates
*
*
*/
inline grid_key_dx<dim> getGKey()
{
return git.get(g_c).getGKey(a_it->get());
}
/*! \brief Return the level at which we are
*
*
*/
inline size_t getLvl() const
{
return g_c;
}
};
#endif /* SRC_AMR_GRID_DIST_AMR_KEY_ITERATOR_HPP_ */
This diff is collapsed.
/*
* amr_base_unit_test.cpp
*
* Created on: Oct 5, 2017
* Author: i-bird
*/
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "Grid/grid_dist_id.hpp"
#include "Point_test.hpp"
#include "Grid/tests/grid_dist_id_util_tests.hpp"
BOOST_AUTO_TEST_SUITE( amr_grid_dist_id_test )
BOOST_AUTO_TEST_CASE( grid_dist_id_amr )
{
// Domain
Box<2,float> domain2({0.0,0.0},{1.0,1.0});
size_t sz[2] = {100,100};
// Ghost
Ghost<2,long int> g(1);
// periodicity
periodicity<2> pr = {{PERIODIC,PERIODIC}};
openfpm::vector<Box<2,long int>> C_draw;
C_draw.add(Box<2,long int>({20,20},{50,24}));
C_draw.add(Box<2,long int>({51,20},{60,24}));
C_draw.add(Box<2,long int>({61,20},{70,24}));
C_draw.add(Box<2,long int>({20,25},{24,66}));
C_draw.add(Box<2,long int>({15,67},{49,85}));
C_draw.add(Box<2,long int>({50,76},{70,81}));
C_draw.add(Box<2,long int>({30,25},{34,37}));
C_draw.add(Box<2,long int>({50,66},{70,70}));
size_t volume_key = 0;
for (size_t i = 0 ; i < C_draw.size() ; i++)
{
volume_key += Box<2,long int>(C_draw.get(i)).getVolumeKey();
}
// Distributed grid with id decomposition
grid_dist_id<2,float,Point_test<float>> g_dist(sz,domain2,g,pr,C_draw);
// fill with gkey
auto git = g_dist.getDomainIterator();
grid_sm<2,void> gs(sz);
size_t count = 0;
while (git.isNext())
{
auto key = git.get();
auto gkey = git.getGKey(key);
g_dist.template get<0>(key) = gs.LinId(gkey);
count++;
++git;
}
Vcluster & vcl = create_vcluster();
vcl.sum(count);
vcl.execute();
BOOST_REQUIRE_EQUAL(count,volume_key);
g_dist.ghost_get<0>();
// Check it is correct
bool check = true;
size_t check_count = 0;
auto git2 = g_dist.getDomainGhostIterator();
while (git2.isNext())
{
auto key = git2.get();
auto gkey = git2.getGKey(key);
float value = g_dist.template get<0>(key);
// check if the point is inside or outside the domain
for (size_t k = 0; k < C_draw.size() ; k++)
{
if (Box<2,long int>(C_draw.get(k)).isInside(gkey.toPoint()) == true)
{
check &= value == gs.LinId(gkey);
// get the gdb_ext
auto & gdb_ext = g_dist.getLocalGridsInfo();
for (size_t s = 0 ; s < gdb_ext.size() ; s++)
{
Box<2,long int> bx = gdb_ext.get(s).Dbox;
bx += gdb_ext.get(s).origin;
if (bx.isInside(gkey.toPoint()))
{
check_count++;
break;
}
}
break;
}
}
++git2;
}
vcl.sum(check_count);
vcl.execute();
BOOST_REQUIRE_EQUAL(check,true);
BOOST_REQUIRE(check_count >= volume_key);
}
BOOST_AUTO_TEST_CASE( amr_grid_dist_id_iterator_test_use_2D)
{
// Domain
Box<2,float> domain({0.0,0.0},{1.0,1.0});
#ifdef TEST_COVERAGE_MODE
long int k = 256*256*create_vcluster().getProcessingUnits();
#else
long int k = 1024*1024*create_vcluster().getProcessingUnits();
#endif
k = std::pow(k, 1/2.);
long int big_step = k / 30;
big_step = (big_step == 0)?1:big_step;
long int small_step = 21;
print_test( "AMR Testing 2D full grid k<=",k);
// 2D test
for ( ; k >= 2 ; k-= (k > 2*big_step)?big_step:small_step )
{
BOOST_TEST_CHECKPOINT( "AMR Testing 2D full grid k=" << k );
//! [Create and access a distributed grid]
// grid size
size_t sz[2];
sz[0] = k;
sz[1] = k;
// periodicity
periodicity<2> pr = {{PERIODIC,PERIODIC}};
// Ghost
Ghost<2,long int> g(1);
openfpm::vector<Box<2,long int>> bx_def;
bx_def.add(Box<2,long int>({0,0},{k-1,k-1}));
// Distributed grid with id decomposition
grid_dist_id<2, float, scalar<float>> g_dist(sz,domain,g,pr,bx_def);
Test2D_core(g_dist,sz,k);
}
}
BOOST_AUTO_TEST_SUITE_END()
......@@ -255,7 +255,7 @@ public:
// Optimize the decomposition creating bigger spaces
// And reducing Ghost over-stress
dec_optimizer<dim, Graph_CSR<nm_v, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
dec_optimizer<dim, Graph_CSR<nm_v<dim>, nm_e>> d_o(dist.getGraph(), gr_dist.getSize());
// Ghost
Ghost<dim,long int> ghe;
......@@ -268,7 +268,7 @@ public:
}
// optimize the decomposition
d_o.template optimize<nm_v::sub_id, nm_v::proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
d_o.template optimize<nm_v_sub_id, nm_v_proc_id>(dist.getGraph(), p_id, loc_box, box_nn_processor,ghe,bc);
// Initialize
if (loc_box.size() > 0)
......@@ -325,7 +325,7 @@ public:
size_t lin = gr_dist.LinId(key2);
size_t lin2 = gr.LinId(key);
fine_s.get(lin2) = dist.getGraph().template vertex_p<nm_v::proc_id>(lin);
fine_s.get(lin2) = dist.getGraph().template vertex_p<nm_v_proc_id>(lin);
++git;
}
......@@ -347,7 +347,7 @@ public:
if (bound.isValidN() == true)
{
// Not necessary, but I prefer
bound.enlarge(ghost);
//bound.enlarge(ghost);
// calculate the sub-divisions
size_t div[dim];
......@@ -762,7 +762,10 @@ public:
cart.cd = cd;
cart.domain = domain;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.bbox = bbox;
cart.ghost = g;
......@@ -800,8 +803,13 @@ public:
cart.gr = gr;
cart.cd = cd;
cart.domain = domain;
cart.gr_dist = gr_dist;
cart.dist = dist;
for (size_t i = 0 ; i < dim ; i++)
{cart.spacing[i] = spacing[i];};
{
cart.spacing[i] = spacing[i];
cart.magn[i] = magn[i];
};
cart.ghost = ghost;
......@@ -832,9 +840,14 @@ public:
gr = cart.gr;
cd = cart.cd;
domain = cart.domain;
gr_dist = cart.gr_dist;
dist = cart.dist;
for (size_t i = 0 ; i < dim ; i++)
{spacing[i] = cart.spacing[i];};
{
spacing[i] = cart.spacing[i];
magn[i] = cart.magn[i];
};
ghost = cart.ghost;
......@@ -864,9 +877,15 @@ public:
fine_s.swap(cart.fine_s);
gr = cart.gr;
cd = cart.cd;
gr_dist = cart.gr_dist;
dist = cart.dist;
domain = cart.domain;
for (size_t i = 0 ; i < dim ; i++)
{spacing[i] = cart.spacing[i];};
{
spacing[i] = cart.spacing[i];
magn[i] = cart.magn[i];
};
ghost = cart.ghost;
......@@ -1567,6 +1586,23 @@ public:
return v_cl;
}
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_geo_cell()
{
ie_ghost<dim,T>::free_geo_cell();
}
/*! \brief Deallocate structures that identify a point to which internal ghost is located
*
*/
void free_fines()
{
fine_s.clear();
fine_s.shrink_to_fit();
}
/*! \brief function to check the consistency of the information of the decomposition
*
* \return false if is inconsistent
......
......@@ -26,10 +26,10 @@ class DistParMetisDistribution
Box<dim, T> domain;
//! Processor sub-sub-domain graph
DistGraph_CSR<nm_v, nm_e> g;
DistGraph_CSR<nm_v<dim>, nm_e> g;
//! Convert the graph to parmetis format
DistParmetis<DistGraph_CSR<nm_v, nm_e>> parmetis_graph;
DistParmetis<DistGraph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Init vtxdist needed for Parmetis
openfpm::vector<idx_t> vtxdist;
......@@ -91,20 +91,20 @@ public:
domain = dom;
//! Create sub graph
DistGraphFactory<dim, DistGraph_CSR<nm_v, nm_e>> dist_g_factory;
DistGraphFactory<dim, DistGraph_CSR<nm_v<dim>, nm_e>> dist_g_factory;
g = dist_g_factory.template construct<NO_EDGE, T, dim - 1, 0>(gr.getSize(), domain);
g.getDecompositionVector(vtxdist);
if (dim == 2)
for (size_t i = 0; i < g.getNVertex(); i++)
g.vertex(i).template get<nm_v::x>()[2] = 0;
g.vertex(i).template get<nm_v_x>()[2] = 0;
}
/*! \brief Get the current graph (main)
*
*/
DistGraph_CSR<nm_v, nm_e> & getGraph()
DistGraph_CSR<nm_v<dim>, nm_e> & getGraph()
{
return g;
}
......@@ -118,7 +118,7 @@ public:
parmetis_graph.initSubGraph(g);
//! Decompose
parmetis_graph.decompose<nm_v::proc_id>(g);
parmetis_graph.template decompose<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -143,7 +143,7 @@ public:
parmetis_graph.reset(g);
//! Refine
parmetis_graph.refine<nm_v::proc_id>(g);
parmetis_graph.template refine<nm_v_proc_id>(g);
//! Get result partition for this processors
idx_t *partition = parmetis_graph.getPartition();
......@@ -194,10 +194,10 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << " Position - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
pos[0] = g.vertex(id).template get<nm_v::x>()[0];
pos[1] = g.vertex(id).template get<nm_v::x>()[1];
pos[0] = g.vertex(id).template get<nm_v_x>()[0];
pos[1] = g.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = g.vertex(id).template get<nm_v::x>()[2];
pos[2] = g.vertex(id).template get<nm_v_x>()[2];
}
/*! \brief Function that set the weight of the vertex
......@@ -215,7 +215,7 @@ public:
#endif
// If the vertex is inside this processor update the value
g.vertex(id).template get<nm_v::computation>() = weight;
g.vertex(id).template get<nm_v_computation>() = weight;
}
......@@ -240,7 +240,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getTotNVertex() << ")\n";
#endif
return g.vertex(id).template get<nm_v::computation>();
return g.vertex(id).template get<nm_v_computation>();
}
/*! \brief Compute the processor load counting the total weights of its vertices
......@@ -253,7 +253,7 @@ public:
for (size_t i = 0; i < g.getNVertex(); i++)
{
load += g.vertex(i).template get<nm_v::computation>();
load += g.vertex(i).template get<nm_v_computation>();
}
return load;
}
......@@ -294,7 +294,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Migration - Such vertex doesn't exist (id = " << id << ", " << "total size = " << g.getNVertex() << ")\n";
#endif
g.vertex(id).template get<nm_v::migration>() = migration;
g.vertex(id).template get<nm_v_migration>() = migration;
}
/*! \brief Set communication cost of the edge id
......@@ -333,7 +333,7 @@ public:
*/
void write(const std::string & file)
{
VTKWriter<DistGraph_CSR<nm_v, nm_e>, DIST_GRAPH> gv2(g);
VTKWriter<DistGraph_CSR<nm_v<dim>, nm_e>, DIST_GRAPH> gv2(g);
gv2.write(std::to_string(file + ".vtk"));
}
......
......@@ -39,13 +39,13 @@ class MetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Flag that indicate if we are doing a test (In general it fix the seed)
bool testing = false;
//! Metis decomposer utility
Metis<Graph_CSR<nm_v, nm_e>> metis_graph;
Metis<Graph_CSR<nm_v<dim>, nm_e>> metis_graph;
/*! \brief sub-domain list and weight
*
......@@ -104,7 +104,7 @@ class MetisDistribution
public:
static constexpr unsigned int computation = nm_v::computation;
static constexpr unsigned int computation = nm_v_computation;
/*! \brief constructor
*
......@@ -182,20 +182,20 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
// Init to 0.0 axis z (to fix in graphFactory)
if (dim < 3)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
}
}
for (size_t i = 0; i < gp.getNVertex(); i++)
gp.vertex(i).template get<nm_v::global_id>() = i;
gp.vertex(i).template get<nm_v_global_id>() = i;
}
/*! \brief Get the current graph (main)
......@@ -203,7 +203,7 @@ public:
* \return the current sub-sub domain Graph
*
*/
Graph_CSR<nm_v, nm_e> & getGraph()
Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{
#ifdef SE_CLASS2
check_valid(this,8);
......@@ -230,7 +230,7 @@ public:
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
gp.template vertex_p<nm_v::computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_computation>(recv_ass.get(i).id) = recv_ass.get(i).w;
metis_graph.initMetisGraph(v_cl.getProcessingUnits(),true);
}
......@@ -239,13 +239,13 @@ public:
metis_graph.onTest(testing);
// decompose
metis_graph.decompose<nm_v::proc_id>();
metis_graph.template decompose<nm_v_proc_id>();
if (recv_ass.size() != 0)
{
// we fill the assignment
for (size_t i = 0 ; i < recv_ass.size() ; i++)
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id);
}
else
{
......@@ -255,7 +255,7 @@ public:
for (size_t i = 0 ; i < gp.getNVertex() ; i++)
{
recv_ass.get(i).id = i;
recv_ass.get(i).w = gp.template vertex_p<nm_v::proc_id>(i);
recv_ass.get(i).w = gp.template vertex_p<nm_v_proc_id>(i);
}
}
}
......@@ -277,7 +277,7 @@ public:
// Fill the metis graph
for (size_t i = 0 ; i < recv_ass.size() ; i++)
{
gp.template vertex_p<nm_v::proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
gp.template vertex_p<nm_v_proc_id>(recv_ass.get(i).id) = recv_ass.get(i).w;
if (recv_ass.get(i).w == v_cl.getProcessUnitID())
{
......@@ -330,10 +330,10 @@ public:
check_overflow(id);
// Copy the geometrical informations inside the pos vector
pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
pos[0] = gp.vertex(id).template get<nm_v_x>()[0];
pos[1] = gp.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
{pos[2] = gp.vertex(id).template get<nm_v_x>()[2];}
}
/*! \brief function that get the computational cost of the sub-sub-domain
......@@ -349,7 +349,7 @@ public:
check_valid(this,8);
#endif
check_overflow(id);
return gp.vertex(id).template get<nm_v::computation>();
return gp.vertex(id).template get<nm_v_computation>();
}
......@@ -394,7 +394,7 @@ public:
check_overflow(id);
#endif
gp.vertex(id).template get<nm_v::migration>() = cost;
gp.vertex(id).template get<nm_v_migration>() = cost;
}
/*! \brief Set communication cost between neighborhood sub-sub-domains (weight on the edge)
......@@ -522,7 +522,7 @@ public:
check_valid(this,8);
#endif
VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
VTKWriter<Graph_CSR<nm_v<dim>, nm_e>, VTK_GRAPH> gv2(gp);
gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + out + ".vtk");
}
......@@ -545,7 +545,7 @@ public:
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < gp.getNVertex(); i++)
loads.get(gp.template vertex_p<nm_v::proc_id>(i)) += gp.template vertex_p<nm_v::computation>(i);
{loads.get(gp.template vertex_p<nm_v_proc_id>(i)) += gp.template vertex_p<nm_v_computation>(i);}
for (size_t i = 0 ; i < v_cl.getProcessingUnits() ; i++)
{
......
......@@ -47,10 +47,10 @@ class ParMetisDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
//! Convert the graph to parmetis format
Parmetis<Graph_CSR<nm_v, nm_e>> parmetis_graph;
Parmetis<Graph_CSR<nm_v<dim>, nm_e>> parmetis_graph;
//! Id of the sub-sub-domain where we set the costs
openfpm::vector<size_t> sub_sub_owner;
......@@ -114,10 +114,10 @@ class ParMetisDistribution
auto v_id = m2g.find(l)->second.id;
// Update proc id in the vertex (using the old map)
gp.template vertex_p<nm_v::proc_id>(v_id) = partitions.get(i).get(k);
gp.template vertex_p<nm_v_proc_id>(v_id) = partitions.get(i).get(k);
if (partitions.get(i).get(k) == (long int)v_cl.getProcessUnitID())
sub_sub_owner.add(v_id);
{sub_sub_owner.add(v_id);}
// Add vertex to temporary structure of distribution (needed to update main graph)
v_per_proc.get(partitions.get(i).get(k)).add(getVertexGlobalId(l));
......@@ -137,12 +137,12 @@ class ParMetisDistribution
for (size_t i = 0 ; i < gp.getNVertex(); ++i)
{
size_t pid = gp.template vertex_p<nm_v::proc_id>(i);
size_t pid = gp.template vertex_p<nm_v_proc_id>(i);
rid j = rid(vtxdist.get(pid).id + cnt.get(pid));
gid gi = gid(i);
gp.template vertex_p<nm_v::id>(i) = j.id;
gp.template vertex_p<nm_v_id>(i) = j.id;
cnt.get(pid)++;
setMapId(j,gi);
......@@ -325,8 +325,8 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v::id, T, dim - 1, 0>(gr.getSize(), domain, bc);
CartesianGraphFactory<dim, Graph_CSR<nm_v<dim>, nm_e>> g_factory_part;
gp = g_factory_part.template construct<NO_EDGE, nm_v_id, T, dim - 1, 0>(gr.getSize(), domain, bc);
initLocalToGlobalMap();
//! Get the number of processing units
......@@ -351,12 +351,12 @@ public:
{
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::x>()[2] = 0.0;
gp.vertex(i).template get<nm_v_x>()[2] = 0.0;
}
}
for (size_t i = 0; i < gp.getNVertex(); i++)
{
gp.vertex(i).template get<nm_v::global_id>() = i;
gp.vertex(i).template get<nm_v_global_id>() = i;
}
}
......@@ -364,7 +364,7 @@ public:
/*! \brief Get the current graph (main)
*
*/
Graph_CSR<nm_v, nm_e> & getGraph()
Graph_CSR<nm_v<dim>, nm_e> & getGraph()
{
return gp;
}
......@@ -463,10 +463,10 @@ public:
#endif
// Copy the geometrical informations inside the pos vector
pos[0] = gp.vertex(id).template get<nm_v::x>()[0];
pos[1] = gp.vertex(id).template get<nm_v::x>()[1];
pos[0] = gp.vertex(id).template get<nm_v_x>()[0];
pos[1] = gp.vertex(id).template get<nm_v_x>()[1];
if (dim == 3)
pos[2] = gp.vertex(id).template get<nm_v::x>()[2];
pos[2] = gp.vertex(id).template get<nm_v_x>()[2];
}
/*! \brief Function that set the weight of the vertex
......@@ -478,15 +478,15 @@ public:
inline void setComputationCost(size_t id, size_t weight)
{
if (!verticesGotWeights)
verticesGotWeights = true;
{verticesGotWeights = true;}
#ifdef SE_CLASS1
if (id >= gp.getNVertex())
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
{std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";}
#endif
// Update vertex in main graph
gp.vertex(id).template get<nm_v::computation>() = weight;
gp.vertex(id).template get<nm_v_computation>() = weight;
}
/*! \brief Checks if weights are used on the vertices
......@@ -510,7 +510,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
return gp.vertex(id).template get<nm_v::computation>();
return gp.vertex(id).template get<nm_v_computation>();
}
/*! \brief Compute the processor load counting the total weights of its vertices
......@@ -526,7 +526,7 @@ public:
for (rid i = vtxdist.get(p_id); i < vtxdist.get(p_id+1) ; ++i)
load += gp.vertex(m2g.find(i)->second.id).template get<nm_v::computation>();
load += gp.vertex(m2g.find(i)->second.id).template get<nm_v_computation>();
//std::cout << v_cl.getProcessUnitID() << " weight " << load << " size " << sub_g.getNVertex() << "\n";
return load;
......@@ -544,7 +544,7 @@ public:
std::cerr << __FILE__ << ":" << __LINE__ << "Such vertex doesn't exist (id = " << id << ", " << "total size = " << gp.getNVertex() << ")\n";
#endif
gp.vertex(id).template get<nm_v::migration>() = migration;
gp.vertex(id).template get<nm_v_migration>() = migration;
}
/*! \brief Set communication cost of the edge id
......@@ -615,6 +615,22 @@ public:
return gp.getNChilds(id);
}
/*! \brief In case we do not do Dynamic load balancing this this data-structure it is safe to eliminate the full internal graph
*
*
*
*/
void destroy_internal_graph()
{
gp.destroy();
partitions.clear();
partitions.shrink_to_fit();
v_per_proc.clear();
v_per_proc.shrink_to_fit();
m2g.clear();
m2g.rehash(0);
}
/*! \brief Print the current distribution and save it to VTK file
*
* \param file filename
......@@ -622,7 +638,7 @@ public:
*/
void write(const std::string & file)
{
VTKWriter<Graph_CSR<nm_v, nm_e>, VTK_GRAPH> gv2(gp);
VTKWriter<Graph_CSR<nm_v<dim>, nm_e>, VTK_GRAPH> gv2(gp);
gv2.write(std::to_string(v_cl.getProcessUnitID()) + "_" + file + ".vtk");
}
......@@ -638,6 +654,7 @@ public:
verticesGotWeights = dist.verticesGotWeights;
sub_sub_owner = dist.sub_sub_owner;
m2g = dist.m2g;
parmetis_graph = dist.parmetis_graph;
return *this;
}
......@@ -655,10 +672,23 @@ public:
verticesGotWeights = dist.verticesGotWeights;
sub_sub_owner.swap(dist.sub_sub_owner);
m2g.swap(dist.m2g);
parmetis_graph = dist.parmetis_graph;
return *this;
}
/*! \brief return the the position of the sub-sub-domain
*
* \param i sub-sub-domain id
* \param p point
*
*/
void getSubSubDomainPos(size_t j, Point<dim,T> & p)
{
for (size_t i = 0 ; i < dim ; i++)
{p.get(i) = gp.template vertex_p<0>(sub_sub_owner.get(j))[i];}
}
/*! \brief Get the decomposition counter
*
* \return the decomposition counter
......
......@@ -32,7 +32,7 @@ class SpaceDistribution
Box<dim, T> domain;
//! Global sub-sub-domain graph
Graph_CSR<nm_v, nm_e> gp;
Graph_CSR<nm_v<dim>, nm_e> gp;
public:
......@@ -84,24 +84,24 @@ public:
domain = dom;
// Create a cartesian grid graph
CartesianGraphFactory<dim, Graph_CSR<nm_v, nm_e>> g_factory_part;