Commit 1c09878b authored by incardon's avatar incardon

All test working

parent e310f6cc
openfpm_data @ 6c8ba9db
Subproject commit e9c615ab034051ea1bb666930189c9b0ee745fc4
Subproject commit 6c8ba9db530c53c09c9212dff5098a5b4890fb23
......@@ -1044,19 +1044,18 @@ public:
v_cl.execute();
size_t size_r;
size_t size = gdb_ext_global.size();
if (v_cl.getProcessUnitID() == 0)
{
size_t size = gdb_ext_global.size();
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
for (size_t i = 1; i < v_cl.getProcessingUnits(); i++)
v_cl.send(i,0,&size,sizeof(size_t));
}
size_r = size;
}
else
{
v_cl.recv(0,0,&size_r,sizeof(size_t));
}
v_cl.execute();
gdb_ext_global.resize(size_r);
......@@ -1077,52 +1076,6 @@ public:
v_cl.execute();
}
/*! \brief It gathers the local grids for all of the processors
*
*
*
*/
void getGlobalGrids(openfpm::vector<openfpm::vector<device_grid>> & loc_grid_global) const
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
v_cl.SGather(loc_grid,loc_grid_global,0);
v_cl.execute();
size_t size_r;
if (v_cl.getProcessUnitID() == 0)
{
size_t size = loc_grid_global.size();
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
v_cl.send(i,0,&size,sizeof(size_t));
}
}
else
{
v_cl.recv(0,0,&size_r,sizeof(size_t));
}
v_cl.execute();
loc_grid_global.resize(size_r);
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < v_cl.getProcessingUnits(); i++)
{
v_cl.send(i,0,loc_grid_global);
}
}
else
{
v_cl.recv(0,0,loc_grid_global);
}
v_cl.execute();
}
/*! \brief It return an iterator that span the full grid domain (each processor span its local domain)
*
......@@ -1898,7 +1851,12 @@ public:
H5Fclose(file);
}
void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2)
void load_block(long int bid,
hssize_t mpi_size_old,
int * metadata_out,
openfpm::vector<size_t> & metadata_accum,
hid_t plist_id,
hid_t dataset_2)
{
/* if (mpi_size >= mpi_size_old)
{
......@@ -2037,12 +1995,12 @@ public:
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
*/
size_t sum = 0;
/* size_t sum = 0;
for (int i = 0; i < mpi_size_old; i++)
{
sum += metadata_out[i];
}
}*/
// std::cout << "LOAD: sum: " << sum << std::endl;
......
......@@ -29,9 +29,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
if (v_cl.getProcessingUnits() >= 32)
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Saving Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
sz[0] = k;
......@@ -67,8 +64,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
count++;
}
std::cout << "Count: " << count << std::endl;
openfpm::vector<size_t> count_total;
v_cl.allGather(count,count_total);
v_cl.execute();
......@@ -78,15 +73,11 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test )
for (size_t i = 0; i < count_total.size(); i++)
sum += count_total.get(i);
std::cout << "Sum: " << sum << std::endl;
timer t;
t.start();
// Save the grid
g_dist.save("grid_dist_id.h5");
t.stop();
std::cout << "Saving time: " << t.getwct() << std::endl;
}
BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
......@@ -106,9 +97,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
if (v_cl.getProcessingUnits() >= 32)
return;
if (v_cl.getProcessUnitID() == 0)
std::cout << "Loading Distributed 2D Grid..." << std::endl;
// grid size
size_t sz[2];
sz[0] = k;
......@@ -132,8 +120,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
g_dist.write("Loaded_grid");
g_dist.getDecomposition().write("Loaded_grid_decomposition");
std::cout << "Loading time: " << t.getwct() << std::endl;
auto it = g_dist.getDomainIterator();
size_t count = 0;
......@@ -154,8 +140,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test )
count++;
}
std::cout << "COOOOOOUNT: " << count << std::endl;
openfpm::vector<size_t> count_total;
v_cl.allGather(count,count_total);
v_cl.execute();
......
......@@ -10,7 +10,7 @@
#include "../../openfpm_data/src/NN/CellList/CellListFast_gen.hpp"
#include "../../openfpm_data/src/NN/CellList/CellListFast_gen.hpp"
#include "HDF5_XdmfWriter/HDF5_XdmfWriter.hpp"
#include "HDF5_wr/HDF5_wr.hpp"
#include "VCluster/VCluster.hpp"
#include "Space/Shape/Point.hpp"
#include "Vector/Iterators/vector_dist_iterator.hpp"
......@@ -745,21 +745,6 @@ public:
}
/*! \brief Construct a cell list symmetric based on a cut of radius
*
* \tparam CellL CellList type to construct
*
* \param r_cut interation radius, or size of each cell
*
* \return the Cell list
*
*/
template<typename CellL = CellList<dim, St, FAST, shift<dim, St> > > CellL getCellListSymNoBind(St r_cut)
{
return getCellList(r_cut);
}
/*! \brief Construct a cell list starting from the stored particles
*
* \tparam CellL CellList type to construct
......@@ -1613,513 +1598,16 @@ public:
inline void save(const std::string & filename) const
{
//Pack_request vector
size_t req = 0;
//std::cout << "V_pos.size() before save: " << v_pos.size() << std::endl;
//Pack request
Packer<decltype(v_pos),HeapMemory>::packRequest(v_pos,req);
Packer<decltype(v_prp),HeapMemory>::packRequest(v_prp,req);
std::cout << "Req: " << req << std::endl;
// allocate the memory
HeapMemory pmem;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(req,pmem));
mem.incRef();
//Packing
Pack_stat sts;
Packer<decltype(v_pos),HeapMemory>::pack(mem,v_pos,sts);
Packer<decltype(v_prp),HeapMemory>::pack(mem,v_prp,sts);
/*****************************************************************
* Create a new file with default creation and access properties.*
* Then create a dataset and write data to it and close the file *
* and dataset. *
*****************************************************************/
int mpi_rank = v_cl.getProcessUnitID();
int mpi_size = v_cl.getProcessingUnits();
MPI_Comm comm = v_cl.getMPIComm();
MPI_Info info = MPI_INFO_NULL;
/*
//Initialize MPI
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
*/
// Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
// Create a new file collectively and release property list identifier.
hid_t file = H5Fcreate (filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
H5Pclose(plist_id);
size_t sz = pmem.size();
//std::cout << "Pmem.size: " << pmem.size() << std::endl;
openfpm::vector<size_t> sz_others;
v_cl.allGather(sz,sz_others);
v_cl.execute();
size_t sum = 0;
for (size_t i = 0; i < sz_others.size(); i++)
sum += sz_others.get(i);
//Size for data space in file
hsize_t fdim[1] = {sum};
//Size for data space in file
hsize_t fdim2[1] = {(size_t)mpi_size};
//Create data space in file
hid_t file_dataspace_id = H5Screate_simple(1, fdim, NULL);
//Create data space in file
hid_t file_dataspace_id_2 = H5Screate_simple(1, fdim2, NULL);
//Size for data space in memory
hsize_t mdim[1] = {pmem.size()};
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
if (mpi_rank == 0)
std::cout << "Total object size: " << sum << std::endl;
//Create data set in file
hid_t file_dataset = H5Dcreate (file, "vector_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
//Create data set 2 in file
hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_INT, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
//H5Pclose(plist_id);
H5Sclose(file_dataspace_id);
H5Sclose(file_dataspace_id_2);
hsize_t block[1] = {pmem.size()};
HDF5_writer<VECTOR_DIST> h5s;
//hsize_t stride[1] = {1};
hsize_t count[1] = {1};
hsize_t offset[1] = {0};
for (int i = 0; i < mpi_rank; i++)
{
if (mpi_rank == 0)
offset[0] = 0;
else
offset[0] += sz_others.get(i);
}
std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
int metadata[mpi_size];
for (int i = 0; i < mpi_size; i++)
metadata[i] = sz_others.get(i);
//Select hyperslab in the file.
file_dataspace_id = H5Dget_space(file_dataset);
H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset, NULL, count, block);
file_dataspace_id_2 = H5Dget_space(file_dataset_2);
//Create property list for collective dataset write.
plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
//Write a data set to a file
H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer());
//Write a data set 2 to a file
H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
//Close/release resources.
H5Dclose(file_dataset);
H5Sclose(file_dataspace_id);
H5Dclose(file_dataset_2);
H5Sclose(file_dataspace_id_2);
H5Sclose(mem_dataspace_id);
H5Pclose(plist_id);
H5Fclose(file);
}
void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2)
{
/* if (mpi_size >= mpi_size_old)
{
if (mpi_rank >= mpi_size_old)
block[0] = 0;
else
block[0] = {(size_t)metadata_out[mpi_rank]};
}
else
{
int x = mpi_size_old/mpi_size;
int shift = mpi_rank*x;
for (int i = 0; i < x; i++)
{
//block0.get(mpi_rank).add(metadata_out[shift]);
block[0] += metadata_out[shift];
shift++;
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
block_add[0] += metadata_out[mpi_size*x+mpi_rank];
//block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]);
}
}*/
// std::cout << "BID: " << bid << std::endl;
hsize_t offset[1];
hsize_t block[1];
if (bid < mpi_size_old && bid != -1)
{
offset[0] = metadata_accum.get(bid);
block[0] = metadata_out[bid];
}
else
{
offset[0] = 0;
block[0] = 0;
}
// std::cout << "Offset: " << offset[0] << "; Block: " << block[0]<< std::endl;
// hsize_t offset_add[1] = {0};
/* if (mpi_size >= mpi_size_old)
{
if (mpi_rank >= mpi_size_old)
offset[0] = 0;
else
{
for (int i = 0; i < mpi_rank; i++)
offset[0] += metadata_out[i];
}
}
else
{
int x = mpi_size_old/mpi_size;
int shift = mpi_rank*x;
for (int i = 0; i < shift; i++)
{
offset[0] += metadata_out[i];
//offset0.get(mpi_rank).add(metadata_out[i]);
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
for (int i = 0; i < mpi_size*x + mpi_rank; i++)
{
offset_add[0] += metadata_out[i];
//offset_add0.get(mpi_rank).add(metadata_out[i]);
}
}
}*/
//hsize_t stride[1] = {1};
hsize_t count[1] = {1};
//std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
/*
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl;
for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++)
std::cout << ", Offset: " << offset0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < offset_add0.get(mpi_rank).size(); i++)
std::cout << ", Offset_add: " << offset_add0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block0.get(mpi_rank).size(); i++)
std::cout << ", Block: " << block0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block_add0.get(mpi_rank).size(); i++)
std::cout << ", Block_add: " << block_add0.get(mpi_rank).get(i) << std::endl;
*/
//Select file dataspace
hid_t file_dataspace_id_2 = H5Dget_space(dataset_2);
H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block);
//Select file dataspace
/* hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);*/
hsize_t mdim_2[1] = {block[0]};
// hsize_t mdim_3[1] = {block_add[0]};
//Size for data space in memory
/*if (mpi_rank >= mpi_size_old)
mdim_2[0] = 0;
else
mdim_2[0] = metadata_out[mpi_rank];*/
//Create data space in memory
hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
// hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
//if (mpi_rank == 0)
/* {
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_2);
printf ("\nLOAD: memspace_id_2 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_2);
printf ("LOAD: dataspace_id_2 size: %llu\n", size2);
}*/
/*
if (mpi_rank == 0)
{
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_3);
printf ("\nLOAD: memspace_id_3 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_3);
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
*/
size_t sum = 0;
for (int i = 0; i < mpi_size_old; i++)
{
sum += metadata_out[i];
}
// std::cout << "LOAD: sum: " << sum << std::endl;
// allocate the memory
HeapMemory pmem;
// HeapMemory pmem2;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
mem.incRef();
// ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
// mem2.incRef();
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
// Read the dataset.
// H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
mem.allocate(pmem.size());
// mem2.allocate(pmem2.size());
// std::cout << "Mem.size(): " << mem.size() << " = " << block[0] << std::endl;
Unpack_stat ps;
openfpm::vector<Point<dim, St>> v_pos_unp;
openfpm::vector<prop> v_prp_unp;
Unpacker<decltype(v_pos_unp),HeapMemory>::unpack(mem,v_pos_unp,ps,1);
Unpacker<decltype(v_prp_unp),HeapMemory>::unpack(mem,v_prp_unp,ps,1);
// Unpack_stat ps2;
// Unpacker<decltype(v_pos),HeapMemory>::unpack(mem2,v_pos_unp,ps2,1);
// Unpacker<decltype(v_prp),HeapMemory>::unpack(mem2,v_prp_unp,ps2,1);
// std::cout << "V_pos.size(): " << v_pos.size() << std::endl;
// std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl;
mem.decRef();
delete &mem;
for (size_t i = 0; i < v_pos_unp.size(); i++)
v_pos.add(v_pos_unp.get(i));
for (size_t i = 0; i < v_prp_unp.size(); i++)
v_prp.add(v_prp_unp.get(i));
g_m = v_pos.size();
h5s.save(filename,v_pos,v_prp);
}
inline void load(const std::string & filename)
{
v_pos.clear();
v_prp.clear();
g_m = 0;
MPI_Comm comm = v_cl.getMPIComm();
MPI_Info info = MPI_INFO_NULL;
int mpi_rank = v_cl.getProcessUnitID();
//int mpi_size = v_cl.getProcessingUnits();
// Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
//Open a file
hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id);
H5Pclose(plist_id);
//Open dataset
hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT);
//Create property list for collective dataset read
plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
//Select file dataspace
hid_t file_dataspace_id = H5Dget_space(dataset);
hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id);
/*
if (mpi_rank == 0)
printf ("\nOld MPI size: %llu\n", mpi_size_old);
*/
//Where to read metadata
int metadata_out[mpi_size_old];
for (int i = 0; i < mpi_size_old; i++)
{
metadata_out[i] = 0;
}
//Size for data space in memory
hsize_t mdim[1] = {(size_t)mpi_size_old};
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
/*
if (mpi_rank == 0)
{
hssize_t size;
size = H5Sget_select_npoints (mem_dataspace_id);
printf ("LOAD: memspace_id size: %llu\n", size);
size = H5Sget_select_npoints (file_dataspace_id);
printf ("LOAD: dataspace_id size: %llu\n", size);
}
*/
// Read the dataset.
H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
/*
if (mpi_rank == 0)
{
std::cout << "Metadata_out[]: ";
for (int i = 0; i < mpi_size_old; i++)
{
std::cout << metadata_out[i] << " ";
}
std::cout << " " << std::endl;
}
*/
openfpm::vector<size_t> metadata_accum;