Commit b7f01ce0 authored by Yaroslav's avatar Yaroslav

Slight changes to vect and grid save/load tests

parent 2762c36c
......@@ -1677,12 +1677,15 @@ public:
//std::cout << "(" << gdb_ext_global.get(i).Dbox.getLow(0) << "; " << gdb_ext_global.get(i).Dbox.getHigh(0) << ")" << std::endl;
//std::cout << "I = " << i << ", Origin is (" << gdb_ext_global.get(i).origin.get(0) << "; " << gdb_ext_global.get(i).origin.get(1) << ")" << std::endl;
this->template map_(domain,dec,cd_sm,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global);
this->template map_(domain,dec,cd_sm,loc_grid,loc_grid_old,gdb_ext,gdb_ext_old,gdb_ext_global);
}
inline void save(const std::string & filename) const
{
//std::cout << "Loc_grid.size() before save: " << loc_grid.size() << std::endl;
//std::cout << "Gdb_ext.size() before save: " << gdb_ext.size() << std::endl;
//Pack_request vector
size_t req = 0;
......@@ -1690,7 +1693,7 @@ public:
Packer<decltype(loc_grid),HeapMemory>::packRequest(loc_grid,req);
Packer<decltype(gdb_ext),HeapMemory>::packRequest(gdb_ext,req);
std::cout << "Req: " << req << std::endl;
//std::cout << "Req: " << req << std::endl;
// allocate the memory
HeapMemory pmem;
......@@ -1714,9 +1717,6 @@ public:
int mpi_rank = v_cl.getProcessUnitID();
int mpi_size = v_cl.getProcessingUnits();
if (mpi_rank == 0)
std::cout << "Saving grid" << std::endl;
MPI_Comm comm = v_cl.getMPIComm();
MPI_Info info = MPI_INFO_NULL;
......@@ -1730,7 +1730,7 @@ public:
H5Pclose(plist_id);
size_t sz = pmem.size();
std::cout << "Pmem.size: " << pmem.size() << std::endl;
//std::cout << "Pmem.size: " << pmem.size() << std::endl;
openfpm::vector<size_t> sz_others;
v_cl.allGather(sz,sz_others);
v_cl.execute();
......@@ -1758,7 +1758,8 @@ public:
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
std::cout << "Sum: " << sum << std::endl;
if (mpi_rank == 0)
std::cout << "Total object size: " << sum << std::endl;
//Create data set in file
hid_t file_dataset = H5Dcreate (file, "grid_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
......@@ -1786,7 +1787,7 @@ public:
offset[0] += sz_others.get(i);
}
std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
//std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
int metadata[mpi_size];
......@@ -1829,9 +1830,6 @@ public:
int mpi_rank = v_cl.getProcessUnitID();
int mpi_size = v_cl.getProcessingUnits();
if (mpi_rank == 0)
std::cout << "Loading" << std::endl;
// Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
......@@ -1852,8 +1850,8 @@ public:
hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
if (mpi_rank == 0)
printf ("\nOld MPI size: %llu\n", mpi_size_old);
//if (mpi_rank == 0)
//printf ("\nOld MPI size: %llu\n", mpi_size_old);
//Where to read metadata
int metadata_out[mpi_size_old];
......@@ -1869,7 +1867,7 @@ public:
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
/*
if (mpi_rank == 0)
{
hssize_t size;
......@@ -1879,10 +1877,10 @@ public:
size = H5Sget_select_npoints (file_dataspace_id);
printf ("dataspace_id size: %llu\n", size);
}
*/
// Read the dataset.
H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
/*
if (mpi_rank == 0)
{
std::cout << "Metadata_out[]: ";
......@@ -1892,7 +1890,7 @@ public:
}
std::cout << " " << std::endl;
}
*/
//Open dataset
hid_t dataset_2 = H5Dopen (file, "grid_dist", H5P_DEFAULT);
......@@ -1958,7 +1956,7 @@ public:
//hsize_t stride[1] = {1};
hsize_t count[1] = {1};
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
//std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
//Select file dataspace
......@@ -1977,7 +1975,7 @@ public:
//Create data space in memory
hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
/*
if (mpi_rank == 0)
{
hssize_t size2;
......@@ -1997,7 +1995,7 @@ public:
size2 = H5Sget_select_npoints (file_dataspace_id_3);
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
*/
size_t sum = 0;
for (int i = 0; i < mpi_size_old; i++)
......@@ -2006,36 +2004,64 @@ public:
}
std::cout << "LOAD: sum: " << sum << std::endl;
//std::cout << "LOAD: sum: " << sum << std::endl;
// allocate the memory
HeapMemory pmem;
HeapMemory pmem2;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0]+block_add[0],pmem));
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
mem.incRef();
ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
mem2.incRef();
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem.getPointer());
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
mem.allocate(pmem.size());
std::cout << "Mem.size(): " << mem.size() << " = " << block[0]+block_add[0] << std::endl;
mem2.allocate(pmem2.size());
//std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl;
Unpack_stat ps;
Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem,loc_grid_old,ps);
Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem,gdb_ext_old,ps);
Unpack_stat ps2;
openfpm::vector<device_grid> loc_grid_old_unp;
openfpm::vector<GBoxes<device_grid::dims>> gdb_ext_old_unp;
Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem2,loc_grid_old_unp,ps2);
Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem2,gdb_ext_old_unp,ps2);
/*
std::cout << "Loc_grid_old.size() before merge: " << loc_grid_old.size() << std::endl;
std::cout << "Gdb_ext_old.size() before merge: " << gdb_ext_old.size() << std::endl;
std::cout << "Loc_grid_old_unp.size() before merge: " << loc_grid_old_unp.size() << std::endl;
std::cout << "Gdb_ext_old_unp.size() before merge: " << gdb_ext_old_unp.size() << std::endl;
*/
for (size_t i = 0; i < loc_grid_old_unp.size(); i++)
loc_grid_old.add(loc_grid_old_unp.get(i));
for (size_t i = 0; i < gdb_ext_old_unp.size(); i++)
gdb_ext_old.add(gdb_ext_old_unp.get(i));
/*
std::cout << "Loc_grid_old.size() after merge: " << loc_grid_old.size() << std::endl;
std::cout << "Gdb_ext_old.size() after merge: " << gdb_ext_old.size() << std::endl;
std::cout << "*********************************" << std::endl;
*/
// Close the dataset.
H5Dclose(dataset);
H5Dclose(dataset_2);
// Close the file.
H5Fclose(file);
H5Pclose(plist_id);
Unpack_stat ps;
Unpacker<decltype(loc_grid_old),HeapMemory>::unpack(mem,loc_grid_old,ps);
Unpacker<decltype(gdb_ext_old),HeapMemory>::unpack(mem,gdb_ext_old,ps);
mem.decRef();
delete &mem;
......
......@@ -16,9 +16,9 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
{
// Input data
size_t k = 10;
size_t k = 1000;
size_t ghost_part = 0.01;
size_t ghost_part = 0.02;
// Domain
Box<2,float> domain({0.0,0.0},{1.0,1.0});
......@@ -30,7 +30,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Testing 2D grid HDF5 save" << std::endl;
std::cout << "Saving Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
......@@ -50,17 +50,22 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
bool val = dec.check_consistency();
BOOST_REQUIRE_EQUAL(val,true);
// Save the vector
timer t;
t.start();
// Save the grid
g_dist.save("grid_dist_id.h5");
t.stop();
std::cout << "Saving time: " << t.getwct() << std::endl;
}
BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
{
// Input data
size_t k = 100;
size_t k = 1000;
size_t ghost_part = 0.01;
size_t ghost_part = 0.02;
// Domain
Box<2,float> domain({0.0,0.0},{1.0,1.0});
......@@ -72,7 +77,7 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Testing 2D grid HDF5 save/load" << std::endl;
std::cout << "Loading Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
......@@ -85,38 +90,29 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
// Distributed grid with id decomposition
grid_dist_id<2, float, scalar<float>, CartDecomposition<2,float>> g_dist(sz,domain,g);
timer t;
t.start();
// Save the grid
g_dist.load("grid_dist_id.h5");
/*
auto NN = vd.getCellList(0.5);
auto it_v = vd.getDomainIterator();
t.stop();
while (it_v.isNext())
{
//key
vect_dist_key_dx key = it_v.get();
std::cout << "Loading time: " << t.getwct() << std::endl;
size_t count = 0;
auto it = g_dist.getDomainIterator();
// Get the position of the particles
Point<dim,float> p = vd.getPos(key);
size_t count = 0;
// Get the neighborhood of the particle
auto cell_it = NN.template getNNIterator<NO_CHECK>(NN.getCell(p));
while(cell_it.isNext())
{
//Next particle in a cell
++cell_it;
count++;
}
while (it.isNext())
{
//key
grid_dist_key_dx<2> key = it.get();
std::cout << "Count: " << count << std::endl;
//g_dist.get(key);
//Next particle in cell list
++it_v;
++it;
count++;
}
*/
BOOST_REQUIRE_EQUAL(count, (size_t)1000*1000);
}
BOOST_AUTO_TEST_SUITE_END()
......
......@@ -46,113 +46,23 @@ class grid_dist_id_comm
public:
//! It processes one box
template<typename T1, typename T2, typename T3, typename T4> inline void process_map_box(size_t i, long int & end, long int & id_end, T1 & m_pos, T2 & m_prp, T3 & v_pos, T4 & v_prp, openfpm::vector<size_t> & cnt)
{
/*
long int prc_id = m_oBox.template get<1>(i);
size_t id = m_oBox.template get<0>(i);
if (prc_id >= 0)
{
size_t lbl = p_map_req.get(prc_id);
m_pos.get(lbl).set(cnt.get(lbl), v_pos.get(id));
cnt.get(lbl)++;
// swap the particle
long int id_valid = get_end_valid(end,id_end);
if (id_valid > 0 && (long int)id < id_valid)
{
v_pos.set(id,v_pos.get(id_valid));
v_prp.set(id,v_prp.get(id_valid));
}
}
else
{
// swap the particle
long int id_valid = get_end_valid(end,id_end);
if (id_valid > 0 && (long int)id < id_valid)
{
v_pos.set(id,v_pos.get(id_valid));
v_prp.set(id,v_prp.get(id_valid));
}
}
*/
}
/*! \brief Allocates and fills the send buffer for the map function
*
/*! \brief Reconstruct the local grids
*
* \param m_oGrid_recv Vector of labeled grids to combine into a local grid
*/
void fill_send_map_buf_(openfpm::vector<device_grid> & loc_grid, openfpm::vector<size_t> & prc_sz_r, openfpm::vector<openfpm::vector<SpaceBox<dim,St>>> & m_box)
inline void grids_reconstruct(openfpm::vector<openfpm::vector<device_grid>> & m_oGrid_recv, openfpm::vector<device_grid> & loc_grid)
{
m_box.resize(prc_sz_r.size());
openfpm::vector<size_t> cnt(prc_sz_r.size());
for (size_t i = 0; i < prc_sz_r.size(); i++)
{
// set the size and allocate, using mem warant that pos and prp is contiguous
m_box.get(i).resize(prc_sz_r.get(i));
cnt.get(i) = 0;
}
/*
// end vector point
long int id_end = v_pos.size();
// end opart point
long int end = m_opart.size()-1;
// Run through all the particles and fill the sending buffer
for (size_t i = 0; i < m_opart.size(); i++)
{
process_map_particle<proc_with_prp<prp_object,prp...>>(i,end,id_end,m_pos,m_prp,v_pos,v_prp,cnt);
}
v_pos.resize(v_pos.size() - m_opart.size());
*/
}
/*! \brief Allocates and fills the send buffer for the map function
*
*
*/
void fill_send_map_buf(openfpm::vector<device_grid> & loc_grid, openfpm::vector<size_t> & prc_sz_r, openfpm::vector<openfpm::vector<SpaceBox<dim,St>>> & m_box)
{
m_box.resize(prc_sz_r.size());
openfpm::vector<size_t> cnt(prc_sz_r.size());
for (size_t i = 0; i < prc_sz_r.size(); i++)
{
// set the size and allocate, using mem warant that pos and prp is contiguous
m_box.get(i).resize(prc_sz_r.get(i));
cnt.get(i) = 0;
}
/*
// end vector point
long int id_end = v_pos.size();
// end opart point
long int end = m_opart.size()-1;
// Run through all the particles and fill the sending buffer
for (size_t i = 0; i < m_opart.size(); i++)
{
process_map_particle<proc_with_prp<prp_object,prp...>>(i,end,id_end,m_pos,m_prp,v_pos,v_prp,cnt);
}
v_pos.resize(v_pos.size() - m_opart.size());
*/
}
/*! \brief Label intersection grids for mappings
*
* \param prc_sz For each processor the number of grids to send
*/
void labelIntersectionGridsProcessor(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global, openfpm::vector<openfpm::vector<device_grid>> & lbl_b, openfpm::vector<size_t> & prc_sz)
inline void labelIntersectionGridsProcessor(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global, openfpm::vector<openfpm::vector<device_grid>> & lbl_b, openfpm::vector<size_t> & prc_sz)
{
// resize the label buffer
lbl_b.resize(v_cl.getProcessingUnits());
......@@ -259,7 +169,7 @@ public:
* \param g_m ghost marker
*
*/
void map_(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global)
void map_(Box<dim,St> domain, Decomposition & dec, CellDecomposer_sm<dim,St,shift<dim,St>> & cd_sm, openfpm::vector<device_grid> & loc_grid, openfpm::vector<device_grid> & loc_grid_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_old, openfpm::vector<GBoxes<device_grid::dims>> & gdb_ext_global)
{
// Processor communication size
openfpm::vector<size_t> prc_sz(v_cl.getProcessingUnits());
......@@ -286,11 +196,7 @@ public:
}
}
//! Boxes vector
//openfpm::vector<openfpm::vector<device_grid>> m_box;
//fill_send_map_buf(loc_grid_global, prc_sz_r, m_box);
// Vector for receiving of intersection grids
openfpm::vector<openfpm::vector<device_grid>> m_oGrid_recv;
m_oGrid_recv.resize(m_oGrid.size());
......@@ -299,9 +205,15 @@ public:
m_oGrid_recv.get(i).resize(m_oGrid.get(i).size());
}
// Send and recieve intersection grids
v_cl.SSendRecv(m_oGrid,m_oGrid_recv,prc_r,prc_recv_map,recv_sz_map);
std::cout << "m_oGrid.size(): " << m_oGrid.size() << std::endl;
std::cout << "m_oGrid_recv.size(): " << m_oGrid_recv.size() << std::endl;
// Reconstruct the new local grids
grids_reconstruct(m_oGrid_recv,loc_grid);
}
/*! \brief Constructor
......
......@@ -977,6 +977,8 @@ public:
//Pack_request vector
size_t req = 0;
//std::cout << "V_pos.size() before save: " << v_pos.size() << std::endl;
//Pack request
Packer<decltype(v_pos),HeapMemory>::packRequest(v_pos,req);
Packer<decltype(v_prp),HeapMemory>::packRequest(v_prp,req);
......@@ -1005,9 +1007,6 @@ public:
int mpi_rank = v_cl.getProcessUnitID();
int mpi_size = v_cl.getProcessingUnits();
if (mpi_rank == 0)
std::cout << "Saving" << std::endl;
MPI_Comm comm = v_cl.getMPIComm();
MPI_Info info = MPI_INFO_NULL;
/*
......@@ -1025,7 +1024,7 @@ public:
H5Pclose(plist_id);
size_t sz = pmem.size();
std::cout << "Pmem.size: " << pmem.size() << std::endl;
//std::cout << "Pmem.size: " << pmem.size() << std::endl;
openfpm::vector<size_t> sz_others;
v_cl.allGather(sz,sz_others);
v_cl.execute();
......@@ -1053,7 +1052,8 @@ public:
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
std::cout << "Sum: " << sum << std::endl;
if (mpi_rank == 0)
std::cout << "Total object size: " << sum << std::endl;
//Create data set in file
hid_t file_dataset = H5Dcreate (file, "vector_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
......@@ -1124,9 +1124,6 @@ public:
int mpi_rank = v_cl.getProcessUnitID();
int mpi_size = v_cl.getProcessingUnits();
if (mpi_rank == 0)
std::cout << "Loading" << std::endl;
// Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
......@@ -1170,9 +1167,9 @@ public:
hssize_t size;
size = H5Sget_select_npoints (mem_dataspace_id);
printf ("\nmemspace_id size: %llu\n", size);
printf ("LOAD: memspace_id size: %llu\n", size);
size = H5Sget_select_npoints (file_dataspace_id);
printf ("dataspace_id size: %llu\n", size);
printf ("LOAD: dataspace_id size: %llu\n", size);
}
// Read the dataset.
......@@ -1197,7 +1194,17 @@ public:
hsize_t block[1] = {0};
hsize_t block_add[1] = {0};
/*
openfpm::vector<openfpm::vector<hsize_t>> block0;
openfpm::vector<openfpm::vector<hsize_t>> block_add0;
openfpm::vector<openfpm::vector<hsize_t>> offset0;
openfpm::vector<openfpm::vector<hsize_t>> offset_add0;
block0.resize(mpi_size);
offset0.resize(mpi_size);
block_add0.resize(mpi_size);
offset_add0.resize(mpi_size);
*/
if (mpi_size >= mpi_size_old)
{
if (mpi_rank >= mpi_size_old)
......@@ -1211,12 +1218,16 @@ public:
int shift = mpi_rank*x;
for (int i = 0; i < x; i++)
{
//block0.get(mpi_rank).add(metadata_out[shift]);
block[0] += metadata_out[shift];
shift++;
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
block_add[0] += metadata_out[mpi_size*x+mpi_rank];
//block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]);
}
}
hsize_t offset[1] = {0};
......@@ -1240,13 +1251,17 @@ public:
for (int i = 0; i < shift; i++)
{
offset[0] += metadata_out[i];
//offset0.get(mpi_rank).add(metadata_out[i]);
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
for (int i = 0; i < mpi_size*x + mpi_rank; i++)
{
offset_add[0] += metadata_out[i];
//offset_add0.get(mpi_rank).add(metadata_out[i]);
}
}
}
......@@ -1254,7 +1269,17 @@ public:
hsize_t count[1] = {1};
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
/*
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl;
for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++)
std::cout << ", Offset: " << offset0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < offset_add0.get(mpi_rank).size(); i++)
std::cout << ", Offset_add: " << offset_add0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block0.get(mpi_rank).size(); i++)
std::cout << ", Block: " << block0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block_add0.get(mpi_rank).size(); i++)
std::cout << ", Block_add: " << block_add0.get(mpi_rank).get(i) << std::endl;
*/
//Select file dataspace
hid_t file_dataspace_id_2 = H5Dget_space(dataset_2);
......@@ -1313,18 +1338,36 @@ public:
// allocate the memory
HeapMemory pmem;
HeapMemory pmem2;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0]+block_add[0],pmem));
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
mem.incRef();
ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
mem2.incRef();
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem.getPointer());
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
mem.allocate(pmem.size());
std::cout << "Mem.size(): " << mem.size() << " = " << block[0]+block_add[0] << std::endl;
mem2.allocate(pmem2.size());
std::cout << "Mem+mem2.size(): " << mem.size() + mem2.size() << " = " << block[0]+block_add[0] << std::endl;
Unpack_stat ps;
Unpacker<decltype(v_pos),HeapMemory>::unpack(mem,v_pos,ps);
Unpacker<decltype(v_prp),HeapMemory>::unpack(mem,v_prp,ps);
Unpack_stat ps2;
openfpm::vector<Point<dim, St>> v_pos_unp;
openfpm::vector<prop> v_prp_unp;
Unpacker<decltype(v_pos),HeapMemory>::unpack(mem2,v_pos_unp,ps2);
Unpacker<decltype(v_prp),HeapMemory>::unpack(mem2,v_prp_unp,ps2);
// Close the dataset.
H5Dclose(dataset);
......@@ -1333,18 +1376,21 @@ public:
H5Fclose(file);
H5Pclose(plist_id);
Unpack_stat ps;
Unpacker<decltype(v_pos),HeapMemory>::unpack(mem,v_pos,ps);
Unpacker<decltype(v_prp),HeapMemory>::unpack(mem,v_prp,ps);
std::cout << "V_pos.size(): " << v_pos.size() << std::endl;
std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl;
mem.decRef();
delete &mem;
for (size_t i = 0; i < v_pos_unp.size(); i++)
v_pos.add(v_pos_unp.get(i));
g_m = v_pos.size();
std::cout << "V_pos.size() after merge: " << v_pos.size() << std::endl;
map();
std::cout << "V_pos.size() after merge and map: " << v_pos.size() << std::endl;
}
/*! \brief Output particle position and properties
......
......@@ -19,20 +19,20 @@
BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test )
// Input data
//Number of particles
size_t k = 1000;
// Dimensionality
const size_t dim = 3;
BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
{
// Input data
//Number of particles
size_t k = 100;
// Dimensionality
const size_t dim = 3;
/////////////////
Vcluster & v_cl = create_vcluster();
if (create_vcluster().getProcessUnitID() == 0)
std::cout << "Saving distributed vector" << std::endl;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Saving Distributed 3D Vector..." << std::endl;
Box<dim,float> box;
......@@ -49,32 +49,44 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test )
bc[i] = NON_PERIODIC;
// ghost
Ghost<3,float> ghost(0.5);
Ghost<dim,float> ghost(0.1);
vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(k,box,bc,ghost);
// Initialize a dist vector
//vd_initialize<dim>(vd, v_cl, k);
auto it = vd.getIterator();
auto it = vd.getDomainIterator();
std::default_random_engine eg(v_cl.getProcessUnitID()*4313);
std::uniform_real_distribution<float> ud(0.0f, 1.0f);
while (it.isNext())
{
auto key = it.get();
for (size_t i = 0; i < dim; i++)
vd.getPos(key)[i] = 0.45123;