Commit 2762c36c authored by Yaroslav's avatar Yaroslav

Added 2 vector_dist save/load tests

parent a0ff6cb4
openfpm_data @ 68d63a0a
Subproject commit 2477150a744800a0bd1b052ee293febebd869d1d
Subproject commit 68d63a0a530ecda967f56c2f761ead6dcab64de8
......@@ -112,6 +112,9 @@ protected:
//! the set of all local sub-domain as vector
openfpm::vector<SpaceBox<dim, T>> sub_domains;
//! the global set of all sub-domains as vector of 'sub_domains' vectors
mutable openfpm::vector<openfpm::vector<SpaceBox<dim, T>>> sub_domains_global;
//! for each sub-domain, contain the list of the neighborhood processors
openfpm::vector<openfpm::vector<long unsigned int> > box_nn_processor;
......@@ -1215,11 +1218,16 @@ public:
return domain;
}
openfpm::vector<SpaceBox<dim, T>> getSubDomains()
openfpm::vector<SpaceBox<dim, T>> getSubDomains() const
{
return sub_domains;
}
openfpm::vector<openfpm::vector<SpaceBox<dim, T>>> & getSubDomainsGlobal()
{
return sub_domains_global;
}
/*! \brief Check if the particle is local
*
* \warning if the particle id outside the domain the result is unreliable
......
This diff is collapsed.
......@@ -16,15 +16,12 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
{
// Input data
size_t k = 100;
size_t k = 10;
size_t ghost_part = 0.01;
/////////////////
size_t bc[3] = {NON_PERIODIC, NON_PERIODIC, NON_PERIODIC};
// Domain
Box<3,float> domain({0.0,0.0,0.0},{1.0,1.0,1.0});
Box<2,float> domain({0.0,0.0},{1.0,1.0});
Vcluster & v_cl = create_vcluster();
......@@ -33,19 +30,18 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Testing 3D grid HDF5 save/load" << std::endl;
std::cout << "Testing 2D grid HDF5 save" << std::endl;
// grid size
size_t sz[3];
size_t sz[2];
sz[0] = k;
sz[1] = k;
sz[2] = k;
// Ghost
Ghost<3,float> g(ghost_part);
Ghost<2,float> g(ghost_part);
// Distributed grid with id decomposition
grid_dist_id<3, float, scalar<float>, CartDecomposition<3,float>> g_dist(sz,domain,g);
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g);
// get the decomposition
auto & dec = g_dist.getDecomposition();
......@@ -54,29 +50,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
bool val = dec.check_consistency();
BOOST_REQUIRE_EQUAL(val,true);
// for each local volume
// Get the number of local grid needed
size_t n_grid = dec.getNSubDomain();
size_t vol = 0;
// vector of boxes
openfpm::vector<Box<3,size_t>> vb;
// Allocate the grids
for (size_t i = 0 ; i < n_grid ; i++)
{
// Get the local hyper-cube
SpaceBox<3,float> sub = dec.getSubDomain(i);
sub -= domain.getP1();
Box<3,size_t> g_box = g_dist.getCellDecomposer().convertDomainSpaceIntoGridUnits(sub,bc);
vb.add(g_box);
vol += g_box.getVolumeKey();
}
// Save the vector
g_dist.save("grid_dist_id.h5");
}
......@@ -89,11 +62,8 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
size_t ghost_part = 0.01;
/////////////////
size_t bc[3] = {NON_PERIODIC, NON_PERIODIC, NON_PERIODIC};
// Domain
Box<3,float> domain({0.0,0.0,0.0},{1.0,1.0,1.0});
Box<2,float> domain({0.0,0.0},{1.0,1.0});
Vcluster & v_cl = create_vcluster();
......@@ -102,19 +72,18 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Testing 3D grid HDF5 save/load" << std::endl;
std::cout << "Testing 2D grid HDF5 save/load" << std::endl;
// grid size
size_t sz[3];
size_t sz[2];
sz[0] = k;
sz[1] = k;
sz[2] = k;
// Ghost
Ghost<3,float> g(ghost_part);
Ghost<2,float> g(ghost_part);
// Distributed grid with id decomposition
grid_dist_id<3, float, scalar<float>, CartDecomposition<3,float>> g_dist(sz,domain,g);
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g);
g_dist.load("grid_dist_id.h5");
/*
......
......@@ -29,9 +29,6 @@ class grid_dist_id_comm
//! VCluster
Vcluster & v_cl;
//! Domain decomposition
Decomposition dec;
//! Maps the processor id with the communication request into map procedure
openfpm::vector<size_t> p_map_req;
......@@ -41,16 +38,16 @@ class grid_dist_id_comm
//! Stores the size of the elements added for each processor that communicate with us (local processor)
openfpm::vector<size_t> recv_sz_map;
//! For each near processor, outgoing Box
//! \warning m_oBox is assumed to be an ordered list
//! first id point Box
//! For each near processor, outgoing intersection grid
//! \warning m_oGrid is assumed to be an ordered list
//! first id is grid
//! second id is the processor id
openfpm::vector<aggregate<Box<dim,St>,size_t>> m_oBox;
openfpm::vector<openfpm::vector<device_grid>> m_oGrid;
public:
//! It process one particle
template<typename T1, typename T2, typename T3, typename T4> inline void process_map_particle(size_t i, long int & end, long int & id_end, T1 & m_pos, T2 & m_prp, T3 & v_pos, T4 & v_prp, openfpm::vector<size_t> & cnt)
//! It processes one box
template<typename T1, typename T2, typename T3, typename T4> inline void process_map_box(size_t i, long int & end, long int & id_end, T1 & m_pos, T2 & m_prp, T3 & v_pos, T4 & v_prp, openfpm::vector<size_t> & cnt)
{
/*
long int prc_id = m_oBox.template get<1>(i);
......@@ -89,14 +86,9 @@ public:
/*! \brief Allocates and fills the send buffer for the map function
*
* \param v_pos vector of particle positions
* \param v_prp vector of particle properties
* \param prc_r List of processor rank involved in the send
* \param prc_sz_r For each processor in the list the size of the message to send
* \param pb send buffer
*
*/
void fill_send_map_buf(openfpm::vector<device_grid> & loc_grid, openfpm::vector<size_t> & prc_sz_r, openfpm::vector<openfpm::vector<Box<dim,St>>> & m_box)
void fill_send_map_buf_(openfpm::vector<device_grid> & loc_grid, openfpm::vector<size_t> & prc_sz_r, openfpm::vector<openfpm::vector<SpaceBox<dim,St>>> & m_box)
{
m_box.resize(prc_sz_r.size());
openfpm::vector<size_t> cnt(prc_sz_r.size());
......@@ -124,42 +116,131 @@ public:
*/
}
/*! \brief Label particles for mappings
/*! \brief Allocates and fills the send buffer for the map function
*
* \param v_pos vector of particle positions
* \param lbl_p Particle labeled
* \param prc_sz For each processor the number of particles to send
* \param opart id of the particles to send
*
*/
void labelIntersectionGridsProcessor(openfpm::vector<SpaceBox<dim, T>> & sub_domains_old, openfpm::vector<device_grid> & loc_grid, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<aggregate<Box<dim,St>,size_t>> & lbl_b, openfpm::vector<size_t> & prc_sz)
void fill_send_map_buf(openfpm::vector<device_grid> & loc_grid, openfpm::vector<size_t> & prc_sz_r, openfpm::vector<openfpm::vector<SpaceBox<dim,St>>> & m_box)
{
// reset lbl_b
lbl_b.clear();
m_box.resize(prc_sz_r.size());
openfpm::vector<size_t> cnt(prc_sz_r.size());
for (size_t i = 0; i < prc_sz_r.size(); i++)
{
// set the size and allocate, using mem warant that pos and prp is contiguous
m_box.get(i).resize(prc_sz_r.get(i));
cnt.get(i) = 0;
}
/*
// end vector point
long int id_end = v_pos.size();
// end opart point
long int end = m_opart.size()-1;
// Run through all the particles and fill the sending buffer
for (size_t i = 0; i < m_opart.size(); i++)
{
process_map_particle<proc_with_prp<prp_object,prp...>>(i,end,id_end,m_pos,m_prp,v_pos,v_prp,cnt);
}
v_pos.resize(v_pos.size() - m_opart.size());
*/
}
/*! \brief Label intersection grids for mappings
*
* \param prc_sz For each processor the number of grids to send
*/
void labelIntersectionGridsProcessor(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global, openfpm::vector<openfpm::vector<device_grid>> & lbl_b, openfpm::vector<size_t> & prc_sz)
{
// resize the label buffer
prc_sz.resize(v_cl.getProcessingUnits());
lbl_b.resize(v_cl.getProcessingUnits());
// Label all the intersection boxes with the processor id where they should go
for (size_t i = 0; i < dec.getNSubDomain(); i++)
// Label all the intersection grids with the processor id where they should go
for (size_t i = 0; i < gdb_ext_old.size(); i++)
{
for (size_t j = 0; j < sub_domains_old.size(); j++)
for (size_t j = 0; j < gdb_ext_global.size(); j++)
{
size_t p_id = 0;
Box<dim,St> inte_box;
bool intersect = dec.getSubDomain(i).Intersect(sub_domains_old.get(j), inte_box);
// Intersection box
SpaceBox<dim,St> inte_box;
// Local old sub-domain
SpaceBox<dim,St> sub_dom = gdb_ext_old.get(i).Dbox;
// Global new sub-domain
SpaceBox<dim,St> sub_dom_new = gdb_ext_global.get(j).Dbox;
bool intersect = sub_dom.Intersect(sub_dom_new, inte_box);
if (intersect == true)
{
p_id = dec.processorID(inte_box.rnd());
/*
// Grid to send size
size_t sz1[dim];
for (size_t l = 0; l < dim; l++)
{
sz1[l] = inte_box.getHigh(l) - inte_box.getLow(l);
std::cout << "Cont. size on " << l << " dimension: " << sz1[l] << std::endl;
}
*/
// Get processor ID that store intersection box
Point<dim,St> p;
for (size_t i = 0; i < dim; i++)
p.get(i) = (inte_box.getHigh(i) + inte_box.getLow(i))/2;
p_id = dec.processorID(p);
prc_sz.get(p_id)++;
lbl_b.add();
////////////////////
lbl_b.last().template get<0>() = inte_box;
////////////////////
lbl_b.last().template get<1>() = p_id;
std::cout << "P_id: " << p_id << std::endl;
// Convert intersection box from contiguous to discrete
SpaceBox<dim,long int> inte_box_discr = cd_sm.convertDomainSpaceIntoGridUnits(inte_box,dec.periodicity());
inte_box_discr -= gdb_ext.get(i).origin;
// Grid corresponding for gdb_ext_old.get(i) box
device_grid gr = loc_grid_old.get(i);
// Grid to send size
size_t sz[dim];
for (size_t l = 0; l < dim; l++)
{
sz[l] = inte_box_discr.getHigh(l) - inte_box_discr.getLow(l);
std::cout << " Size on " << l << " dimension: " << sz[l] << std::endl;
}
// Grid to send
device_grid gr_send(sz);
gr_send.setMemory();
// Sub iterator across intersection box inside local grid
grid_key_dx<dim> start = inte_box_discr.getKP1();
grid_key_dx<dim> stop = inte_box_discr.getKP2();
std::string start2 = start.to_string();
std::string stop2 = stop.to_string();
auto key_it = gr.getSubIterator(start,stop);
// Copy selected elements into a new sub-grid
while (key_it.isNext())
{
grid_key_dx<dim> key = key_it.get();
//std::string str = key.to_string();
//std::cout << "Key: " << str << std::endl;
gr_send.set(key,gr,key);
++key_it;
}
// Add to the labeling vector
lbl_b.get(p_id).add(gr_send);
std::cout << "9" << std::endl;
}
}
}
......@@ -178,39 +259,49 @@ public:
* \param g_m ghost marker
*
*/
void map_(openfpm::vector<SpaceBox<dim, T>> & sub_domains_old, openfpm::vector<device_grid> & loc_grid, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext)
void map_(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global)
{
// Processor communication size
openfpm::vector<size_t> prc_sz(v_cl.getProcessingUnits());
// Contains the processor id of each grid (basically where they have to go)
labelIntersectionGridsProcessor(sub_domains_old,loc_grid,gdb_ext,m_oBox,prc_sz);
// Contains the processor id of each box (basically where they have to go)
labelIntersectionGridsProcessor(domain,dec,cd_sm,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global,m_oGrid,prc_sz);
// Calculate the sending buffer size for each processor, put this information in
// a contiguous buffer
p_map_req.resize(v_cl.getProcessingUnits());
// Vector of number of boxes for each involved processor
// Vector of number of sending grids for each involved processor
openfpm::vector<size_t> prc_sz_r;
// Vector of ranks of involved processors
openfpm::vector<size_t> prc_r;
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
if (prc_sz.get(i) != 0)
if (m_oGrid.get(i).size() != 0)
{
p_map_req.get(i) = prc_r.size();
prc_r.add(i);
prc_sz_r.add(prc_sz.get(i));
prc_sz_r.add(m_oGrid.get(i).size());
}
}
//! Grids vector
openfpm::vector<openfpm::vector<Box<dim,St>>> m_box;
//! Boxes vector
//openfpm::vector<openfpm::vector<device_grid>> m_box;
//fill_send_map_buf(loc_grid_global, prc_sz_r, m_box);
openfpm::vector<openfpm::vector<device_grid>> m_oGrid_recv;
m_oGrid_recv.resize(m_oGrid.size());
for (size_t i = 0; i < m_oGrid.size(); i++)
{
m_oGrid_recv.get(i).resize(m_oGrid.get(i).size());
}
fill_send_map_buf(loc_grid, prc_sz_r, m_box);
v_cl.SSendRecv(m_oGrid,m_oGrid_recv,prc_r,prc_recv_map,recv_sz_map);
v_cl.SSendRecv(m_box,loc_grid,prc_r,prc_recv_map,recv_sz_map);
std::cout << "m_oGrid_recv.size(): " << m_oGrid_recv.size() << std::endl;
}
/*! \brief Constructor
......@@ -218,8 +309,8 @@ public:
* \param dec Domain decompositon
*
*/
grid_dist_id_comm(const Decomposition & dec)
:v_cl(create_vcluster()),dec(dec)
grid_dist_id_comm()
:v_cl(create_vcluster())
{
}
......
......@@ -1039,7 +1039,7 @@ public:
hsize_t fdim[1] = {sum};
//Size for data space in file
hsize_t fdim2[1] = {mpi_size};
hsize_t fdim2[1] = {(size_t)mpi_size};
//Create data space in file
hid_t file_dataspace_id = H5Screate_simple(1, fdim, NULL);
......@@ -1067,13 +1067,13 @@ public:
hsize_t block[1] = {pmem.size()};
hsize_t stride[1] = {1};
//hsize_t stride[1] = {1};
hsize_t count[1] = {1};
hsize_t offset[1] = {0};
for (size_t i = 0; i < mpi_rank; i++)
for (int i = 0; i < mpi_rank; i++)
{
if (mpi_rank == 0)
offset[0] = 0;
......@@ -1085,7 +1085,7 @@ public:
int metadata[mpi_size];
for (size_t i = 0; i < mpi_size; i++)
for (int i = 0; i < mpi_size; i++)
metadata[i] = sz_others.get(i);
//Select hyperslab in the file.
......@@ -1100,10 +1100,10 @@ public:
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
//Write a data set to a file
herr_t status = H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer());
H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer());
//Write a data set 2 to a file
herr_t status_2 = H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
//Close/release resources.
......@@ -1148,18 +1148,18 @@ public:
hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
if (mpi_rank == 0)
printf ("\nOld MPI size: %i\n", mpi_size_old);
printf ("\nOld MPI size: %llu\n", mpi_size_old);
//Where to read metadata
int metadata_out[mpi_size_old];
for (size_t i = 0; i < mpi_size_old; i++)
for (int i = 0; i < mpi_size_old; i++)
{
metadata_out[i] = 0;
}
//Size for data space in memory
hsize_t mdim[1] = {mpi_size_old};
hsize_t mdim[1] = {(size_t)mpi_size_old};
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
......@@ -1170,18 +1170,18 @@ public:
hssize_t size;
size = H5Sget_select_npoints (mem_dataspace_id);
printf ("\nmemspace_id size: %i\n", size);
printf ("\nmemspace_id size: %llu\n", size);
size = H5Sget_select_npoints (file_dataspace_id);
printf ("dataspace_id size: %i\n", size);
printf ("dataspace_id size: %llu\n", size);
}
// Read the dataset.
herr_t status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
if (mpi_rank == 0)
{
std::cout << "Metadata_out[]: ";
for (size_t i = 0; i < mpi_size_old; i++)
for (int i = 0; i < mpi_size_old; i++)
{
std::cout << metadata_out[i] << " ";
}
......@@ -1203,7 +1203,7 @@ public:
if (mpi_rank >= mpi_size_old)
block[0] = 0;
else
block[0] = {metadata_out[mpi_rank]};
block[0] = {(size_t)metadata_out[mpi_rank]};
}
else
{
......@@ -1228,7 +1228,7 @@ public:
offset[0] = 0;
else
{
for (size_t i = 0; i < mpi_rank; i++)
for (int i = 0; i < mpi_rank; i++)
offset[0] += metadata_out[i];
}
}
......@@ -1237,7 +1237,7 @@ public:
int x = mpi_size_old/mpi_size;
int shift = mpi_rank*x;
for (size_t i = 0; i < shift; i++)
for (int i = 0; i < shift; i++)
{
offset[0] += metadata_out[i];
}
......@@ -1245,12 +1245,12 @@ public:
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
for (size_t i = 0; i < mpi_size*x + mpi_rank; i++)
for (int i = 0; i < mpi_size*x + mpi_rank; i++)
offset_add[0] += metadata_out[i];
}
}
hsize_t stride[1] = {1};
//hsize_t stride[1] = {1};
hsize_t count[1] = {1};
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
......@@ -1286,9 +1286,9 @@ public:
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_2);
printf ("\nLOAD: memspace_id_2 size: %i\n", size2);
printf ("\nLOAD: memspace_id_2 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_2);
printf ("LOAD: dataspace_id_2 size: %i\n", size2);
printf ("LOAD: dataspace_id_2 size: %llu\n", size2);
}
if (mpi_rank == 0)
......@@ -1296,14 +1296,14 @@ public:
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_3);
printf ("\nLOAD: memspace_id_3 size: %i\n", size2);
printf ("\nLOAD: memspace_id_3 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_3);
printf ("LOAD: dataspace_id_3 size: %i\n", size2);
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
size_t sum = 0;
for (size_t i = 0; i < mpi_size_old; i++)
for (int i = 0; i < mpi_size_old; i++)
{
sum += metadata_out[i];
}
......@@ -1318,21 +1318,19 @@ public:
mem.incRef();
// Read the dataset.
herr_t status_2 = H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
// Read the dataset.
herr_t status_3 = H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem.getPointer());
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem.getPointer());
mem.allocate(pmem.size());
std::cout << "Mem.size(): " << mem.size() << " = " << block[0]+block_add[0] << std::endl;
// Close the dataset.
status = H5Dclose(dataset);
status_2 = H5Dclose(dataset_2);
H5Dclose(dataset);
H5Dclose(dataset_2);
// Close the file.
status = H5Fclose(file);
H5Fclose(file);
H5Pclose(plist_id);
Unpack_stat ps;
......
......@@ -22,19 +22,143 @@ BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test )
BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
{
// Input data
//Number of particles
size_t k = 100;
// Dimensionality
const size_t dim = 3;
/////////////////
Vcluster & v_cl = create_vcluster();
if (create_vcluster().getProcessUnitID() == 0)
std::cout << "Saving distributed vector" << std::endl;
Box<dim,float> box;
for (size_t i = 0; i < dim; i++)
{
box.setLow(i,0.0);
box.setHigh(i,1.0);
}
// Boundary conditions
size_t bc[dim];
for (size_t i = 0; i < dim; i++)
bc[i] = NON_PERIODIC;
// ghost
Ghost<3,float> ghost(0.5);
vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(k,box,bc,ghost);
// Initialize a dist vector
//vd_initialize<dim>(vd, v_cl, k);
auto it = vd.getIterator();
while (it.isNext())
{
auto key = it.get();
for (size_t i = 0; i < dim; i++)
vd.getPos(key)[i] = 0.45123;
++it;
}
vd.map();
vd.template ghost_get<0>();
auto it_2 = vd.getIterator();
while (it.isNext())
{
auto key = it_2.get();
//Put the forces
for (size_t i = 0; i < dim; i++)
vd.template getProp<0>(key)[i] = 0.51234;
++it_2;
}
// Save the vector
vd.save("vector_dist.h5");
}
BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test )
{
if (create_vcluster().getProcessUnitID() == 0)
std::cout << "Loading distributed vector" << std::endl;
const size_t dim = 3;
Box<dim,float> box;
for (size_t i = 0; i < dim; i++)
{
box.setLow(i,0.0);
box.setHigh(i,1.0);
}
// Boundary conditions
size_t bc[dim];
for (size_t i = 0; i < dim; i++)
bc[i] = NON_PERIODIC;
// ghost
Ghost<3,float> ghost(0.5);
vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost);
vd.load("vector_dist.h5");
auto it = vd.getDomainIterator();
while (it.isNext())
{
auto key = it.get();