diff --git a/openfpm_data b/openfpm_data index e9c615ab034051ea1bb666930189c9b0ee745fc4..6c8ba9db530c53c09c9212dff5098a5b4890fb23 160000 --- a/openfpm_data +++ b/openfpm_data @@ -1 +1 @@ -Subproject commit e9c615ab034051ea1bb666930189c9b0ee745fc4 +Subproject commit 6c8ba9db530c53c09c9212dff5098a5b4890fb23 diff --git a/src/Grid/grid_dist_id.hpp b/src/Grid/grid_dist_id.hpp index 09efc279832eb640fc9ef986ad13a25617ace20d..e974dce4f28e85e755ce7f91eacc73e28695a622 100644 --- a/src/Grid/grid_dist_id.hpp +++ b/src/Grid/grid_dist_id.hpp @@ -1044,19 +1044,18 @@ public: v_cl.execute(); size_t size_r; + size_t size = gdb_ext_global.size(); if (v_cl.getProcessUnitID() == 0) { - size_t size = gdb_ext_global.size(); - for (size_t i = 0; i < v_cl.getProcessingUnits(); i++) - { + for (size_t i = 1; i < v_cl.getProcessingUnits(); i++) v_cl.send(i,0,&size,sizeof(size_t)); - } + + size_r = size; } else - { v_cl.recv(0,0,&size_r,sizeof(size_t)); - } + v_cl.execute(); gdb_ext_global.resize(size_r); @@ -1077,52 +1076,6 @@ public: v_cl.execute(); } - /*! \brief It gathers the local grids for all of the processors - * - * - * - */ - void getGlobalGrids(openfpm::vector<openfpm::vector<device_grid>> & loc_grid_global) const - { -#ifdef SE_CLASS2 - check_valid(this,8); -#endif - v_cl.SGather(loc_grid,loc_grid_global,0); - v_cl.execute(); - - size_t size_r; - - if (v_cl.getProcessUnitID() == 0) - { - size_t size = loc_grid_global.size(); - for (size_t i = 0; i < v_cl.getProcessingUnits(); i++) - { - v_cl.send(i,0,&size,sizeof(size_t)); - } - } - else - { - v_cl.recv(0,0,&size_r,sizeof(size_t)); - } - v_cl.execute(); - - loc_grid_global.resize(size_r); - - - if (v_cl.getProcessUnitID() == 0) - { - for (size_t i = 0; i < v_cl.getProcessingUnits(); i++) - { - v_cl.send(i,0,loc_grid_global); - } - } - else - { - v_cl.recv(0,0,loc_grid_global); - } - - v_cl.execute(); - } /*! \brief It return an iterator that span the full grid domain (each processor span its local domain) * @@ -1898,7 +1851,12 @@ public: H5Fclose(file); } - void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2) + void load_block(long int bid, + hssize_t mpi_size_old, + int * metadata_out, + openfpm::vector<size_t> & metadata_accum, + hid_t plist_id, + hid_t dataset_2) { /* if (mpi_size >= mpi_size_old) { @@ -2037,12 +1995,12 @@ public: printf ("LOAD: dataspace_id_3 size: %llu\n", size2); } */ - size_t sum = 0; +/* size_t sum = 0; for (int i = 0; i < mpi_size_old; i++) { sum += metadata_out[i]; - } + }*/ // std::cout << "LOAD: sum: " << sum << std::endl; diff --git a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp index cfb08eee597377cd3b7f9fd82ed8f9ac7f17b3ea..fd63918ace57d8f4a9f4f65bda40651e83fec643 100644 --- a/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp +++ b/src/Grid/grid_dist_id_HDF5_chckpnt_restart_test.hpp @@ -29,9 +29,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test ) if (v_cl.getProcessingUnits() >= 32) return; - if (v_cl.getProcessUnitID() == 0) - std::cout << "Saving Distributed 2D Grid..." << std::endl; - // grid size size_t sz[2]; sz[0] = k; @@ -67,8 +64,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test ) count++; } - std::cout << "Count: " << count << std::endl; - openfpm::vector<size_t> count_total; v_cl.allGather(count,count_total); v_cl.execute(); @@ -78,15 +73,11 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_save_test ) for (size_t i = 0; i < count_total.size(); i++) sum += count_total.get(i); - std::cout << "Sum: " << sum << std::endl; - timer t; t.start(); // Save the grid g_dist.save("grid_dist_id.h5"); t.stop(); - - std::cout << "Saving time: " << t.getwct() << std::endl; } BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) @@ -106,9 +97,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) if (v_cl.getProcessingUnits() >= 32) return; - if (v_cl.getProcessUnitID() == 0) - std::cout << "Loading Distributed 2D Grid..." << std::endl; - // grid size size_t sz[2]; sz[0] = k; @@ -132,8 +120,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) g_dist.write("Loaded_grid"); g_dist.getDecomposition().write("Loaded_grid_decomposition"); - std::cout << "Loading time: " << t.getwct() << std::endl; - auto it = g_dist.getDomainIterator(); size_t count = 0; @@ -154,8 +140,6 @@ BOOST_AUTO_TEST_CASE( grid_dist_id_hdf5_load_test ) count++; } - std::cout << "COOOOOOUNT: " << count << std::endl; - openfpm::vector<size_t> count_total; v_cl.allGather(count,count_total); v_cl.execute(); diff --git a/src/Vector/vector_dist.hpp b/src/Vector/vector_dist.hpp index f9ad21357004b694985bf088fffa5beb9900dbbc..781e12bd978988a7be54e7d5ed26d05437c1b42e 100644 --- a/src/Vector/vector_dist.hpp +++ b/src/Vector/vector_dist.hpp @@ -10,7 +10,7 @@ #include "../../openfpm_data/src/NN/CellList/CellListFast_gen.hpp" #include "../../openfpm_data/src/NN/CellList/CellListFast_gen.hpp" -#include "HDF5_XdmfWriter/HDF5_XdmfWriter.hpp" +#include "HDF5_wr/HDF5_wr.hpp" #include "VCluster/VCluster.hpp" #include "Space/Shape/Point.hpp" #include "Vector/Iterators/vector_dist_iterator.hpp" @@ -745,21 +745,6 @@ public: } - /*! \brief Construct a cell list symmetric based on a cut of radius - * - * \tparam CellL CellList type to construct - * - * \param r_cut interation radius, or size of each cell - * - * \return the Cell list - * - */ - template<typename CellL = CellList<dim, St, FAST, shift<dim, St> > > CellL getCellListSymNoBind(St r_cut) - { - return getCellList(r_cut); - } - - /*! \brief Construct a cell list starting from the stored particles * * \tparam CellL CellList type to construct @@ -1613,513 +1598,16 @@ public: inline void save(const std::string & filename) const { - //Pack_request vector - size_t req = 0; - - //std::cout << "V_pos.size() before save: " << v_pos.size() << std::endl; - - //Pack request - Packer<decltype(v_pos),HeapMemory>::packRequest(v_pos,req); - Packer<decltype(v_prp),HeapMemory>::packRequest(v_prp,req); - - std::cout << "Req: " << req << std::endl; - - // allocate the memory - HeapMemory pmem; - //pmem.allocate(req); - ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(req,pmem)); - mem.incRef(); - - //Packing - - Pack_stat sts; - - Packer<decltype(v_pos),HeapMemory>::pack(mem,v_pos,sts); - Packer<decltype(v_prp),HeapMemory>::pack(mem,v_prp,sts); - - /***************************************************************** - * Create a new file with default creation and access properties.* - * Then create a dataset and write data to it and close the file * - * and dataset. * - *****************************************************************/ - - int mpi_rank = v_cl.getProcessUnitID(); - int mpi_size = v_cl.getProcessingUnits(); - - MPI_Comm comm = v_cl.getMPIComm(); - MPI_Info info = MPI_INFO_NULL; -/* - //Initialize MPI - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); -*/ - // Set up file access property list with parallel I/O access - - hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(plist_id, comm, info); - - // Create a new file collectively and release property list identifier. - hid_t file = H5Fcreate (filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); - H5Pclose(plist_id); - - size_t sz = pmem.size(); - //std::cout << "Pmem.size: " << pmem.size() << std::endl; - openfpm::vector<size_t> sz_others; - v_cl.allGather(sz,sz_others); - v_cl.execute(); - - size_t sum = 0; - - for (size_t i = 0; i < sz_others.size(); i++) - sum += sz_others.get(i); - - //Size for data space in file - hsize_t fdim[1] = {sum}; - - //Size for data space in file - hsize_t fdim2[1] = {(size_t)mpi_size}; - - //Create data space in file - hid_t file_dataspace_id = H5Screate_simple(1, fdim, NULL); - - //Create data space in file - hid_t file_dataspace_id_2 = H5Screate_simple(1, fdim2, NULL); - - //Size for data space in memory - hsize_t mdim[1] = {pmem.size()}; - - //Create data space in memory - hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); - - if (mpi_rank == 0) - std::cout << "Total object size: " << sum << std::endl; - - //Create data set in file - hid_t file_dataset = H5Dcreate (file, "vector_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - - //Create data set 2 in file - hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_INT, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - - //H5Pclose(plist_id); - H5Sclose(file_dataspace_id); - H5Sclose(file_dataspace_id_2); - - hsize_t block[1] = {pmem.size()}; + HDF5_writer<VECTOR_DIST> h5s; - //hsize_t stride[1] = {1}; - - hsize_t count[1] = {1}; - - hsize_t offset[1] = {0}; - - for (int i = 0; i < mpi_rank; i++) - { - if (mpi_rank == 0) - offset[0] = 0; - else - offset[0] += sz_others.get(i); - } - - std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl; - - int metadata[mpi_size]; - - for (int i = 0; i < mpi_size; i++) - metadata[i] = sz_others.get(i); - - //Select hyperslab in the file. - file_dataspace_id = H5Dget_space(file_dataset); - H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset, NULL, count, block); - - file_dataspace_id_2 = H5Dget_space(file_dataset_2); - - - //Create property list for collective dataset write. - plist_id = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - - //Write a data set to a file - H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer()); - - //Write a data set 2 to a file - H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata); - - - //Close/release resources. - H5Dclose(file_dataset); - H5Sclose(file_dataspace_id); - H5Dclose(file_dataset_2); - H5Sclose(file_dataspace_id_2); - H5Sclose(mem_dataspace_id); - H5Pclose(plist_id); - H5Fclose(file); - } - - void load_block(long int bid, hssize_t mpi_size_old, int * metadata_out, openfpm::vector<size_t> metadata_accum, hid_t plist_id, hid_t dataset_2) - { -/* if (mpi_size >= mpi_size_old) - { - if (mpi_rank >= mpi_size_old) - block[0] = 0; - else - block[0] = {(size_t)metadata_out[mpi_rank]}; - } - else - { - int x = mpi_size_old/mpi_size; - int shift = mpi_rank*x; - for (int i = 0; i < x; i++) - { - //block0.get(mpi_rank).add(metadata_out[shift]); - block[0] += metadata_out[shift]; - shift++; - } - int y = mpi_size_old%mpi_size; - if (mpi_rank < y) - { - block_add[0] += metadata_out[mpi_size*x+mpi_rank]; - //block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]); - } - }*/ - -// std::cout << "BID: " << bid << std::endl; - - hsize_t offset[1]; - hsize_t block[1]; - - if (bid < mpi_size_old && bid != -1) - { - offset[0] = metadata_accum.get(bid); - block[0] = metadata_out[bid]; - } - else - { - offset[0] = 0; - block[0] = 0; - } - -// std::cout << "Offset: " << offset[0] << "; Block: " << block[0]<< std::endl; -// hsize_t offset_add[1] = {0}; - -/* if (mpi_size >= mpi_size_old) - { - if (mpi_rank >= mpi_size_old) - offset[0] = 0; - else - { - for (int i = 0; i < mpi_rank; i++) - offset[0] += metadata_out[i]; - } - } - else - { - int x = mpi_size_old/mpi_size; - int shift = mpi_rank*x; - - for (int i = 0; i < shift; i++) - { - offset[0] += metadata_out[i]; - //offset0.get(mpi_rank).add(metadata_out[i]); - } - - int y = mpi_size_old%mpi_size; - if (mpi_rank < y) - { - for (int i = 0; i < mpi_size*x + mpi_rank; i++) - { - offset_add[0] += metadata_out[i]; - //offset_add0.get(mpi_rank).add(metadata_out[i]); - } - } - }*/ - - //hsize_t stride[1] = {1}; - hsize_t count[1] = {1}; - - //std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl; -/* - std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl; - for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++) - std::cout << ", Offset: " << offset0.get(mpi_rank).get(i) << std::endl; - for (size_t i = 0; i < offset_add0.get(mpi_rank).size(); i++) - std::cout << ", Offset_add: " << offset_add0.get(mpi_rank).get(i) << std::endl; - for (size_t i = 0; i < block0.get(mpi_rank).size(); i++) - std::cout << ", Block: " << block0.get(mpi_rank).get(i) << std::endl; - for (size_t i = 0; i < block_add0.get(mpi_rank).size(); i++) - std::cout << ", Block_add: " << block_add0.get(mpi_rank).get(i) << std::endl; -*/ - - //Select file dataspace - hid_t file_dataspace_id_2 = H5Dget_space(dataset_2); - - H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block); - - //Select file dataspace -/* hid_t file_dataspace_id_3 = H5Dget_space(dataset_2); - - H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);*/ - - hsize_t mdim_2[1] = {block[0]}; -// hsize_t mdim_3[1] = {block_add[0]}; - - - //Size for data space in memory - - /*if (mpi_rank >= mpi_size_old) - mdim_2[0] = 0; - else - mdim_2[0] = metadata_out[mpi_rank];*/ - - //Create data space in memory - hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL); -// hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL); - - //if (mpi_rank == 0) -/* { - hssize_t size2; - - size2 = H5Sget_select_npoints (mem_dataspace_id_2); - printf ("\nLOAD: memspace_id_2 size: %llu\n", size2); - size2 = H5Sget_select_npoints (file_dataspace_id_2); - printf ("LOAD: dataspace_id_2 size: %llu\n", size2); - }*/ -/* - if (mpi_rank == 0) - { - hssize_t size2; - - size2 = H5Sget_select_npoints (mem_dataspace_id_3); - printf ("\nLOAD: memspace_id_3 size: %llu\n", size2); - size2 = H5Sget_select_npoints (file_dataspace_id_3); - printf ("LOAD: dataspace_id_3 size: %llu\n", size2); - } -*/ - size_t sum = 0; - - for (int i = 0; i < mpi_size_old; i++) - { - sum += metadata_out[i]; - } - -// std::cout << "LOAD: sum: " << sum << std::endl; - - // allocate the memory - HeapMemory pmem; -// HeapMemory pmem2; - //pmem.allocate(req); - ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem)); - mem.incRef(); -// ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2)); -// mem2.incRef(); - - // Read the dataset. - H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer()); - - // Read the dataset. -// H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer()); - - mem.allocate(pmem.size()); -// mem2.allocate(pmem2.size()); -// std::cout << "Mem.size(): " << mem.size() << " = " << block[0] << std::endl; - - Unpack_stat ps; - - openfpm::vector<Point<dim, St>> v_pos_unp; - - openfpm::vector<prop> v_prp_unp; - - Unpacker<decltype(v_pos_unp),HeapMemory>::unpack(mem,v_pos_unp,ps,1); - Unpacker<decltype(v_prp_unp),HeapMemory>::unpack(mem,v_prp_unp,ps,1); - -// Unpack_stat ps2; - - -// Unpacker<decltype(v_pos),HeapMemory>::unpack(mem2,v_pos_unp,ps2,1); -// Unpacker<decltype(v_prp),HeapMemory>::unpack(mem2,v_prp_unp,ps2,1); - -// std::cout << "V_pos.size(): " << v_pos.size() << std::endl; -// std::cout << "V_pos_unp.size(): " << v_pos_unp.size() << std::endl; - - mem.decRef(); - delete &mem; - - for (size_t i = 0; i < v_pos_unp.size(); i++) - v_pos.add(v_pos_unp.get(i)); - - for (size_t i = 0; i < v_prp_unp.size(); i++) - v_prp.add(v_prp_unp.get(i)); - - g_m = v_pos.size(); + h5s.save(filename,v_pos,v_prp); } inline void load(const std::string & filename) { - v_pos.clear(); - v_prp.clear(); - - g_m = 0; - - MPI_Comm comm = v_cl.getMPIComm(); - MPI_Info info = MPI_INFO_NULL; - - int mpi_rank = v_cl.getProcessUnitID(); - //int mpi_size = v_cl.getProcessingUnits(); - - // Set up file access property list with parallel I/O access - hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(plist_id, comm, info); - - //Open a file - hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id); - H5Pclose(plist_id); - - //Open dataset - hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT); - - //Create property list for collective dataset read - plist_id = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - - //Select file dataspace - hid_t file_dataspace_id = H5Dget_space(dataset); - - hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id); -/* - if (mpi_rank == 0) - printf ("\nOld MPI size: %llu\n", mpi_size_old); -*/ - //Where to read metadata - int metadata_out[mpi_size_old]; - - for (int i = 0; i < mpi_size_old; i++) - { - metadata_out[i] = 0; - } - - //Size for data space in memory - hsize_t mdim[1] = {(size_t)mpi_size_old}; - - //Create data space in memory - hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); -/* - if (mpi_rank == 0) - { - hssize_t size; - - size = H5Sget_select_npoints (mem_dataspace_id); - printf ("LOAD: memspace_id size: %llu\n", size); - size = H5Sget_select_npoints (file_dataspace_id); - printf ("LOAD: dataspace_id size: %llu\n", size); - } -*/ - // Read the dataset. - H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out); -/* - if (mpi_rank == 0) - { - std::cout << "Metadata_out[]: "; - for (int i = 0; i < mpi_size_old; i++) - { - std::cout << metadata_out[i] << " "; - } - std::cout << " " << std::endl; - } -*/ - openfpm::vector<size_t> metadata_accum; - metadata_accum.resize(mpi_size_old); - - metadata_accum.get(0) = 0; - for (int i = 1 ; i < mpi_size_old ; i++) - metadata_accum.get(i) = metadata_accum.get(i-1) + metadata_out[i-1]; - - //Open dataset - hid_t dataset_2 = H5Dopen (file, "vector_dist", H5P_DEFAULT); - - //Create property list for collective dataset read - plist_id = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - -/* - openfpm::vector<openfpm::vector<hsize_t>> block0; - openfpm::vector<openfpm::vector<hsize_t>> block_add0; - openfpm::vector<openfpm::vector<hsize_t>> offset0; - openfpm::vector<openfpm::vector<hsize_t>> offset_add0; - - block0.resize(mpi_size); - offset0.resize(mpi_size); - block_add0.resize(mpi_size); - offset_add0.resize(mpi_size); -*/ - openfpm::vector<size_t> n_block; - n_block.resize(v_cl.getProcessingUnits()); - - - for(size_t i = 0 ; i < n_block.size() ; i++) - n_block.get(i) = mpi_size_old / v_cl.getProcessingUnits(); - - size_t rest_block = mpi_size_old % v_cl.getProcessingUnits(); - - //std::cout << "MPI size old: " << mpi_size_old << std::endl; - //std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl; - - - // std::cout << "Rest block: " << rest_block << std::endl; - - size_t max_block; - - if (rest_block != 0) - max_block = n_block.get(0) + 1; - else - max_block = n_block.get(0); - - //for(size_t i = 0 ; i < n_block.size() ; i++) - for(size_t i = 0 ; i < rest_block ; i++) - n_block.get(i) += 1; - - - //for(size_t i = 0 ; i < n_block.size() ; i++) - //std::cout << "n_block.get(i): " << n_block.get(i) << std::endl; - - size_t start_block = 0; - size_t stop_block = 0; - - - if (v_cl.getProcessUnitID() != 0) - { - for(size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++) - start_block += n_block.get(i); - } - - stop_block = start_block + n_block.get(v_cl.getProcessUnitID()); - - //std::cout << "ID: " << v_cl.getProcessUnitID() << "; Start block: " << start_block << "; " << "Stop block: " << stop_block << std::endl; - - if (mpi_rank >= mpi_size_old) - load_block(start_block,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2); - else - { - size_t n_bl = 0; - size_t lb = start_block; - for ( ; lb < stop_block ; lb++, n_bl++) - load_block(lb,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2); - - if (n_bl < max_block) - load_block(-1,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2); - } - - // Close the dataset. - H5Dclose(dataset); - H5Dclose(dataset_2); - // Close the file. - H5Fclose(file); - H5Pclose(plist_id); - - //std::cout << "V_pos.size() after merge: " << v_pos.size() << std::endl; - - // Map particles - //map(); + HDF5_reader<VECTOR_DIST> h5l; - //std::cout << "V_pos.size() after merge and map: " << v_pos.size() << std::endl; + h5l.load(filename,v_pos,v_prp,g_m); } /*! \brief Output particle position and properties diff --git a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp index 0c5da9f9b057b66e4833294420ddac61f1540a28..1ed0477ec6a402a514a00508d72e55fb96446187 100644 --- a/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp +++ b/src/Vector/vector_dist_HDF5_chckpnt_restart_test.hpp @@ -19,21 +19,11 @@ BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test ) -// Input data - -// Number of particles -size_t k = 1000000; - // Dimensionality const size_t dim = 3; BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) { - Vcluster & v_cl = create_vcluster(); - - if (v_cl.getProcessUnitID() == 0) - std::cout << "Saving distributed vector" << std::endl; - Box<dim,float> box; for (size_t i = 0; i < dim; i++) @@ -45,7 +35,7 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) // Boundary conditions size_t bc[dim]; - const size_t Ng = cbrt(k); + const size_t Ng = 32; // we create a Grid iterator size_t sz[dim] = {Ng,Ng,Ng}; @@ -75,10 +65,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) ++it; } - //BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1)); - //BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1)); - //BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1)); - vd.map(); // Put forces @@ -96,13 +82,37 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) ++it2; } - timer t; - t.start(); // Save the vector vd.save("vector_dist.h5"); - t.stop(); - std::cout << "Saving time: " << t.getwct() << std::endl; + vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd2(0,box,bc,ghost); + + vd2.load("vector_dist.h5"); + + // Check that vd and vd2 match + + auto it3 = vd.getDomainIterator(); + + BOOST_REQUIRE_EQUAL(vd.size_local(),vd2.size_local()); + + bool check = true; + while (it3.isNext()) + { + auto p = it3.get(); + + Point<3,float> p1 = vd.getPos(p); + Point<3,float> p2 = vd2.getPos(p); + + check &= (p1 == p2); + + check &= (vd.template getProp<0>(p)[0] == vd2.template getProp<0>(p)[0]); + check &= (vd.template getProp<0>(p)[1] == vd2.template getProp<0>(p)[1]); + check &= (vd.template getProp<0>(p)[2] == vd2.template getProp<0>(p)[2]); + + ++it3; + } + + BOOST_REQUIRE_EQUAL(check,true); } @@ -111,9 +121,6 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) { Vcluster & v_cl = create_vcluster(); - if (v_cl.getProcessUnitID() == 0) - std::cout << "Loading distributed vector" << std::endl; - Box<dim,float> box; for (size_t i = 0; i < dim; i++) @@ -129,58 +136,24 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) bc[i] = NON_PERIODIC; - const size_t Ng = cbrt(k); - - // we create a Grid iterator - size_t sz[3] = {Ng,Ng,Ng}; + const size_t Ng = 32; // ghost Ghost<dim,float> ghost(1.0/(Ng-2)); vector_dist<dim,float, aggregate<float[dim]>, CartDecomposition<dim,float> > vd(0,box,bc,ghost); - vd.load("vector_dist.h5"); - - timer t; - t.start(); - // Save the vector - vd.load("vector_dist.h5"); - t.stop(); - - std::cout << "Loading time: " << t.getwct() << std::endl; + // Load the vector + vd.load("test_data/vector_dist_24.h5"); /////////////////// Checking data /////////////////////// // Check total number of particles size_t n_part = vd.size_local(); - openfpm::vector<size_t> tot_n_part; - v_cl.allGather(n_part,tot_n_part); + v_cl.sum(n_part); v_cl.execute(); - size_t sum = 0; - - for (size_t i = 0; i < tot_n_part.size(); i++) - sum += tot_n_part.get(i); - - BOOST_REQUIRE_EQUAL(sum,k); - - //std::cout << "Sum: " << sum << std::endl; - - // Check spacing (positions) - - auto it = vd.getGridIterator(sz); - - while (it.isNext()) - { - //auto key = it.get(); - - ++it; - } - - BOOST_REQUIRE_EQUAL(it.getSpacing(0),1.0f/(Ng-1)); - BOOST_REQUIRE_EQUAL(it.getSpacing(1),1.0f/(Ng-1)); - BOOST_REQUIRE_EQUAL(it.getSpacing(2),1.0f/(Ng-1)); - + BOOST_REQUIRE_EQUAL(n_part,Ng*Ng*Ng); // Check properties @@ -190,9 +163,9 @@ BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) { auto key = it2.get(); - //Put the forces + // Check the properties for (size_t i = 0; i < dim; i++) - BOOST_CHECK_CLOSE(vd.template getProp<0>(key)[i],0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2],0.0001); + BOOST_REQUIRE_EQUAL(vd.template getProp<0>(key)[i],(float)(0.51234 + vd.getPos(key)[0] + vd.getPos(key)[1]+ vd.getPos(key)[2])); ++it2; }