Skip to content
Snippets Groups Projects
Commit 8065e9eb authored by Pietro Incardona's avatar Pietro Incardona
Browse files

Distributed vector and grid compiling and passing the test

parent 80b9ecb0
No related branches found
No related tags found
No related merge requests found
......@@ -1293,8 +1293,12 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \
../../OpenFPM_data/src/Space/SpaceBox.hpp \
../../OpenFPM_data/src/Space/Shape/Point.hpp \
../../OpenFPM_data/src/Space/Shape/Box.hpp \
../../OpenFPM_data/src/Space/Ghost.hpp Grid/grid_dist_id_iterator.hpp \
Grid/grid_dist_key.hpp ../../OpenFPM_data/src/Point_test.hpp \
../../OpenFPM_data/src/Space/Ghost.hpp \
../../OpenFPM_data/src/Space/SpaceBox.hpp Grid/grid_dist_id_iterator.hpp \
Grid/grid_dist_key.hpp \
../../OpenFPM_data/src/NN/CellList/CellDecomposer.hpp \
../../OpenFPM_data/src/Space/Matrix.hpp \
../../OpenFPM_data/src/Point_test.hpp \
../../OpenFPM_data/src/base_type.hpp \
../../OpenFPM_data/src/Point_orig.hpp \
../../OpenFPM_data/src/Grid/Encap.hpp \
......@@ -1367,8 +1371,6 @@ pdata-main.o: main.cpp /usr/include/stdc-predef.h \
/usr/include/boost/iostreams/detail/path.hpp \
/usr/include/boost/config/abi_prefix.hpp \
/usr/include/boost/config/abi_suffix.hpp dec_optimizer.hpp \
../../OpenFPM_data/src/NN/CellList/CellDecomposer.hpp \
../../OpenFPM_data/src/Space/Matrix.hpp \
/usr/include/c++/4.8.3/unordered_map \
/usr/include/c++/4.8.3/bits/hashtable.h \
/usr/include/c++/4.8.3/bits/hashtable_policy.h \
......@@ -4254,10 +4256,16 @@ Grid/grid_dist_id.hpp:
../../OpenFPM_data/src/Space/Ghost.hpp:
../../OpenFPM_data/src/Space/SpaceBox.hpp:
Grid/grid_dist_id_iterator.hpp:
Grid/grid_dist_key.hpp:
../../OpenFPM_data/src/NN/CellList/CellDecomposer.hpp:
../../OpenFPM_data/src/Space/Matrix.hpp:
../../OpenFPM_data/src/Point_test.hpp:
../../OpenFPM_data/src/base_type.hpp:
......@@ -4414,10 +4422,6 @@ metis_util.hpp:
dec_optimizer.hpp:
../../OpenFPM_data/src/NN/CellList/CellDecomposer.hpp:
../../OpenFPM_data/src/Space/Matrix.hpp:
/usr/include/c++/4.8.3/unordered_map:
/usr/include/c++/4.8.3/bits/hashtable.h:
......
......@@ -687,7 +687,7 @@ p1[0]<-----+ +----> p2[0]
{
if (ghost.template getLow(i) >= domain.template getHigh(i) / gr.size(i) || ghost.template getHigh(i) >= domain.template getHigh(i) / gr.size(i))
{
std::cerr << "Error: Ghost are bigger that one domain" << "\n";
std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " : Ghost are bigger than one domain" << "\n";
}
}
#endif
......@@ -1186,6 +1186,7 @@ p1[0]<-----+ +----> p2[0]
* 1) p_sub_X.vtk domain for the processor X as union of sub-domain
* 2) sub_np_c_X.vtk sub-domain of the near processors contiguous to the processor X (Color encoded)
* 3) sub_X_inte_g_np.vtk Intersection between the ghosts of the near processors and the processors X sub-domains (Color encoded)
* 4) sub_X_ghost.vtk ghost for the processor X (Color encoded)
*
* where X is the processor number
*
......@@ -1216,11 +1217,22 @@ p1[0]<-----+ +----> p2[0]
{
for (size_t s = 0 ; s < box_nn_processor_int.get(p).size() ; s++)
{
auto & diocane = box_nn_processor_int.get(p).get(s).nbx;
vtk_box3.add(diocane);
vtk_box3.add(box_nn_processor_int.get(p).get(s).nbx);
}
}
vtk_box3.write(std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_inte_g_np") + std::string(".vtk"));
//! sub_X_ghost.vtk ghost for the processor X (Color encoded)
VTKWriter<openfpm::vector<::Box<dim,T>>,VECTOR_BOX> vtk_box4;
for (size_t p = 0 ; p < box_nn_processor_int.size() ; p++)
{
for (size_t s = 0 ; s < box_nn_processor_int.get(p).size() ; s++)
{
vtk_box4.add(box_nn_processor_int.get(p).get(s).bx);
}
}
vtk_box4.write(std::string("sub_") + std::to_string(v_cl.getProcessUnitID()) + std::string("_ghost") + std::string(".vtk"));
}
};
......
......@@ -8,36 +8,35 @@
#include "mathutil.hpp"
#include "grid_dist_id_iterator.hpp"
#include "grid_dist_key.hpp"
#include "NN/CellList/CellDecomposer.hpp"
#define SUB_UNIT_FACTOR 64
/*! \brief This is a distributed grid
*
* Implementation of a distributed grid with id decomposition. A distributed grid is a grid distributed
* across processors. The decomposition is performed on the id of the elements
*
* [Examples]
* Implementation of a distributed grid with decomposition on the ids.
* A distributed grid is a grid distributed across processors.
* The decomposition is performed on the ids of the elements
*
* on 1D where the id is from 1 to N
* processor k take M contiguous elements
*
* on 3D where (for example)
* processor k take M id-connected elements
*
* \param dim Dimensionality of the grid
* \param T type of grid
* \param St Type of space where the grid is living
* \param T object the grid is storing
* \param Decomposition Class that decompose the grid for example CartDecomposition
* \param Mem Is the allocator
* \param device type of base structure is going to store the data
*
*/
template<unsigned int dim, typename T, typename Decomposition,typename Memory=HeapMemory , typename device_grid=grid_cpu<dim,T> >
template<unsigned int dim, typename St, typename T, typename Decomposition,typename Memory=HeapMemory , typename device_grid=grid_cpu<dim,T> >
class grid_dist_id
{
// Domain
Box<dim,St> domain;
// Ghost expansion
Box<dim,size_t> ghost;
Ghost<dim,St> ghost;
//! Local grids
Vcluster_object_array<device_grid> loc_grid;
......@@ -48,8 +47,10 @@ class grid_dist_id
//! Size of the grid on each dimension
size_t g_sz[dim];
//! Communicator class
//! Structure that divide the space into cells
CellDecomposer_sm<dim,St> cd_sm;
//! Communicator class
Vcluster & v_cl;
/*! \brief Get the grid size
......@@ -99,8 +100,8 @@ class grid_dist_id
public:
//! constructor
grid_dist_id(Vcluster v_cl, Decomposition & dec, size_t (& g_sz)[dim], Box<dim,size_t> & ghost)
:ghost(ghost),loc_grid(NULL),v_cl(v_cl),dec(dec)
grid_dist_id(Vcluster v_cl, Decomposition & dec, const size_t (& g_sz)[dim], const Box<dim,St> & domain, const Ghost<dim,T> & ghost)
:domain(domain),cd_sm(domain,g_sz,0),ghost(ghost),loc_grid(NULL),v_cl(v_cl),dec(dec)
{
// fill the global size of the grid
for (int i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
......@@ -123,9 +124,14 @@ public:
Create();
}
//! constructor
grid_dist_id(size_t (& g_sz)[dim])
:dec(Decomposition(*global_v_cluster)),v_cl(*global_v_cluster)
/*! \brief Constrcuctor
*
* \param g_sz array with the grid size on each dimension
* \param domain
*
*/
grid_dist_id(const size_t (& g_sz)[dim],const Box<dim,St> & domain)
:domain(domain),cd_sm(domain,g_sz,0),ghost(0),dec(Decomposition(*global_v_cluster)),v_cl(*global_v_cluster)
{
// fill the global size of the grid
for (int i = 0 ; i < dim ; i++) {this->g_sz[i] = g_sz[i];}
......@@ -141,11 +147,8 @@ public:
for (int i = 0 ; i < dim ; i++)
{div[i] = openfpm::math::round_big_2(pow(n_sub,1.0/dim));}
// Box
Box<dim,size_t> b(g_sz);
// Create the sub-domains
dec.setParameters(div,b);
dec.setParameters(div,domain);
// Create local grid
Create();
......@@ -168,10 +171,13 @@ public:
void Create()
{
// Box used for rounding error
Box<dim,St> rnd_box;
for (size_t i = 0 ; i < dim ; i++) {rnd_box.setHigh(i,0.5); rnd_box.setLow(i,0.5);}
// ! Create an hyper-cube approximation.
// ! In order to work on grid_dist the decomposition
// ! has to be a set of hyper-cube
dec.hyperCube();
// Get the number of local grid needed
......@@ -188,15 +194,34 @@ public:
for (size_t i = 0 ; i < n_grid ; i++)
{
// Get the local hyper-cube
SpaceBox<dim,St> sp = dec.getLocalHyperCube(i);
SpaceBox<dim,size_t> sp = dec.getLocalHyperCube(i);
// Convert sp into grid units
sp /= cd_sm.getCellBox().getP2();
// Calculate the local grid size
// enlarge by 0.5 for rounding
sp.enlarge(rnd_box);
getGridSize(sp,l_res);
// Convert from SpaceBox<dim,float> to SpaceBox<dim,size_t>
SpaceBox<dim,size_t> sp_t = sp;
// Set the dimensions of the local grid
// convert the ghost from space coordinate to grid units
Ghost<dim,St> g_int = ghost;
g_int /= cd_sm.getCellBox().getP2();
// enlarge by 0.5 for rounding
g_int.enlarge(rnd_box);
// convert from Ghost<dim,St> to Ghost<dim,size_t>
Ghost<dim,size_t> g_int_t = g_int;
// Enlarge sp with the Ghost size
sp_t.enlarge(g_int_t);
// Get the local size
for (size_t i = 0 ; i < dim ; i++) {l_res[i] = sp_t.getHigh(i) - sp_t.getLow(i);}
// Set the dimensions of the local grid
loc_grid.get(i).template resize<Memory>(l_res);
}
}
......@@ -251,13 +276,7 @@ public:
* Implementation of a distributed grid with id decomposition. A distributed grid is a grid distributed
* across processors. The decomposition is performed on the id of the elements
*
* [Examples]
*
* on 1D where the id is from 1 to N
* processor k take M contiguous elements
*
* on 3D where (for example)
* processor k take M id-connected elements
* 1D specialization
*
* \param dim Dimensionality of the grid
* \param T type of grid
......@@ -270,8 +289,8 @@ public:
template<typename T, typename Decomposition,typename Memory , typename device_grid >
class grid_dist_id<1,T,Decomposition,Memory,device_grid>
{
// Ghost expansion
Box<1,size_t> ghost;
// Ghost
Ghost<1,T> ghost;
//! Local grids
Vcluster_object_array<device_grid> loc_grid;
......@@ -327,7 +346,7 @@ class grid_dist_id<1,T,Decomposition,Memory,device_grid>
public:
//! constructor
grid_dist_id(Vcluster v_cl, Decomposition & dec, size_t (& g_sz)[1], Box<1,size_t> & ghost)
grid_dist_id(Vcluster v_cl, Decomposition & dec, size_t (& g_sz)[1], Box<1,T> & ghost)
:ghost(ghost),loc_grid(NULL),v_cl(v_cl)
{
// fill the global size of the grid
......@@ -339,7 +358,7 @@ public:
//! constructor
grid_dist_id(size_t (& g_sz)[1])
:v_cl(*global_v_cluster)
:v_cl(*global_v_cluster),ghost(0)
{
// fill the global size of the grid
for (int i = 0 ; i < 1 ; i++) {this->g_sz[i] = g_sz[i];}
......
......@@ -6,10 +6,10 @@
BOOST_AUTO_TEST_SUITE( grid_dist_id_test )
template<typename iterator> void jacobi_iteration(iterator g_it, grid_dist_id<2, scalar<float>, CartDecomposition<2,size_t>> & g_dist)
template<typename iterator> void jacobi_iteration(iterator g_it, grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> & g_dist)
{
// scalar
typedef scalar<size_t> S;
typedef scalar<float> S;
// iterator
......@@ -30,6 +30,9 @@ template<typename iterator> void jacobi_iteration(iterator g_it, grid_dist_id<2,
BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use)
{
// Domain
Box<2,float> domain({0.0,0.0},{1.0,1.0});
// Initialize the global VCluster
init_global_v_cluster(&boost::unit_test::framework::master_test_suite().argc,&boost::unit_test::framework::master_test_suite().argv);
......@@ -38,7 +41,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_iterator_test_use)
// Distributed grid with id decomposition
grid_dist_id<2, scalar<float>, CartDecomposition<2,size_t>> g_dist(sz);
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain);
// get the domain iterator
......
......@@ -94,9 +94,10 @@ BOOST_AUTO_TEST_CASE( vector_dist_ghost )
++it;
}
// set the ghost based on the radius cut off
Ghost<2,float> g(spacing.get(0));
// set the ghost based on the radius cut off (make just a little bit smaller than the spacing)
Ghost<2,float> g(spacing.get(0) - spacing .get(0) * 0.0001);
// set the ghost
vd.setGhost(g);
//! Output the decomposition
......@@ -131,20 +132,30 @@ BOOST_AUTO_TEST_CASE( vector_dist_ghost )
bool is_in = false;
size_t b = 0;
size_t lb = 0;
// check if the received data is in one of the ghost boxes
for ( ; b < dec.getNGhostBox() ; b++)
{
if (dec.getGhostBox(b).isInside(vd.getPos<s::x>(key)) == true)
{is_in = true; break;}
{
is_in = true;
// Add
vb.get(b)++;
lb = b;
}
}
BOOST_REQUIRE_EQUAL(is_in,true);
// Check that the particle come from the correct processor
BOOST_REQUIRE_EQUAL(vd.getProp<p::v>(key)[0],dec.getGhostBoxProcessor(b));
BOOST_REQUIRE_EQUAL(vd.getProp<p::v>(key)[0],dec.getGhostBoxProcessor(lb));
// Add
vb.get(b)++;
if (b == 0)
{
int debug = 0;
debug++;
}
++g_it;
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment