From 354eba80cdce01521a09a7b2c6a19cecc4e3e1c0 Mon Sep 17 00:00:00 2001 From: Pietro Incardona <incardon@mpi-cbg.de> Date: Tue, 2 May 2017 01:26:25 +0200 Subject: [PATCH] Fixing missing files --- src/HDF5_wr/HDF5_reader.hpp | 24 +++ src/HDF5_wr/HDF5_reader_vd.hpp | 218 +++++++++++++++++++++++++ src/HDF5_wr/HDF5_wr.hpp | 18 ++ src/HDF5_wr/HDF5_writer.hpp | 28 ++++ src/HDF5_wr/HDF5_writer_unit_tests.hpp | 115 +++++++++++++ src/HDF5_wr/HDF5_writer_vd.hpp | 161 ++++++++++++++++++ 6 files changed, 564 insertions(+) create mode 100644 src/HDF5_wr/HDF5_reader.hpp create mode 100644 src/HDF5_wr/HDF5_reader_vd.hpp create mode 100644 src/HDF5_wr/HDF5_wr.hpp create mode 100644 src/HDF5_wr/HDF5_writer.hpp create mode 100644 src/HDF5_wr/HDF5_writer_unit_tests.hpp create mode 100644 src/HDF5_wr/HDF5_writer_vd.hpp diff --git a/src/HDF5_wr/HDF5_reader.hpp b/src/HDF5_wr/HDF5_reader.hpp new file mode 100644 index 0000000..9f8aa12 --- /dev/null +++ b/src/HDF5_wr/HDF5_reader.hpp @@ -0,0 +1,24 @@ +/* + * HDF5_loader.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_HPP_ + +#include "VCluster/VCluster.hpp" + +template <unsigned int type> +class HDF5_reader +{ + void load() + { + std::cerr << __FILE__ << ":" << __LINE__ << " Error: we do not know how to write this type of data" << std::endl; + } +}; + +#include "HDF5_reader_vd.hpp" + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_HPP_ */ diff --git a/src/HDF5_wr/HDF5_reader_vd.hpp b/src/HDF5_wr/HDF5_reader_vd.hpp new file mode 100644 index 0000000..36041cc --- /dev/null +++ b/src/HDF5_wr/HDF5_reader_vd.hpp @@ -0,0 +1,218 @@ +/* + * HDF5_reader_vd.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_VD_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_VD_HPP_ + + +template <> +class HDF5_reader<VECTOR_DIST> +{ +private: + + template<unsigned int dim, typename St,typename prp> + void load_block(long int bid, + hssize_t mpi_size_old, + int * metadata_out, + openfpm::vector<size_t> metadata_accum, + hid_t plist_id, + hid_t dataset_2, + size_t & g_m, + openfpm::vector<Point<dim,St>> & v_pos, + openfpm::vector<prp> & v_prp) + { + hsize_t offset[1]; + hsize_t block[1]; + + if (bid < mpi_size_old && bid != -1) + { + offset[0] = metadata_accum.get(bid); + block[0] = metadata_out[bid]; + } + else + { + offset[0] = 0; + block[0] = 0; + } + + hsize_t count[1] = {1}; + + + //Select file dataspace + hid_t file_dataspace_id_2 = H5Dget_space(dataset_2); + + H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block); + + hsize_t mdim_2[1] = {block[0]}; + + + //Create data space in memory + hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL); + + size_t sum = 0; + + for (int i = 0; i < mpi_size_old; i++) + sum += metadata_out[i]; + + // allocate the memory + HeapMemory pmem; + + ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem)); + mem.incRef(); + + // Read the dataset. + H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer()); + + mem.allocate(pmem.size()); + + Unpack_stat ps; + + openfpm::vector<Point<dim, St>> v_pos_unp; + + openfpm::vector<prp> v_prp_unp; + + Unpacker<decltype(v_pos_unp),HeapMemory>::unpack(mem,v_pos_unp,ps,1); + Unpacker<decltype(v_prp_unp),HeapMemory>::unpack(mem,v_prp_unp,ps,1); + + mem.decRef(); + delete &mem; + + for (size_t i = 0; i < v_pos_unp.size(); i++) + v_pos.add(v_pos_unp.get(i)); + + for (size_t i = 0; i < v_prp_unp.size(); i++) + v_prp.add(v_prp_unp.get(i)); + + g_m = v_pos.size(); + } + +public: + + template<unsigned int dim, typename St, typename prp> inline void load(const std::string & filename, + openfpm::vector<Point<dim,St>> & v_pos, + openfpm::vector<prp> & v_prp, + size_t & g_m) + { + Vcluster & v_cl = create_vcluster(); + + v_pos.clear(); + v_prp.clear(); + + g_m = 0; + + MPI_Comm comm = v_cl.getMPIComm(); + MPI_Info info = MPI_INFO_NULL; + + int mpi_rank = v_cl.getProcessUnitID(); + + // Set up file access property list with parallel I/O access + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + + //Open a file + hid_t file = H5Fopen (filename.c_str(), H5F_ACC_RDONLY, plist_id); + H5Pclose(plist_id); + + //Open dataset + hid_t dataset = H5Dopen (file, "metadata", H5P_DEFAULT); + + //Create property list for collective dataset read + plist_id = H5Pcreate(H5P_DATASET_XFER); + H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); + + //Select file dataspace + hid_t file_dataspace_id = H5Dget_space(dataset); + + hssize_t mpi_size_old = H5Sget_select_npoints (file_dataspace_id); + + //Where to read metadata + int metadata_out[mpi_size_old]; + + for (int i = 0; i < mpi_size_old; i++) + { + metadata_out[i] = 0; + } + + //Size for data space in memory + hsize_t mdim[1] = {(size_t)mpi_size_old}; + + //Create data space in memory + hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); + + // Read the dataset. + H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out); + + openfpm::vector<size_t> metadata_accum; + metadata_accum.resize(mpi_size_old); + + metadata_accum.get(0) = 0; + for (int i = 1 ; i < mpi_size_old ; i++) + metadata_accum.get(i) = metadata_accum.get(i-1) + metadata_out[i-1]; + + //Open dataset + hid_t dataset_2 = H5Dopen (file, "vector_dist", H5P_DEFAULT); + + //Create property list for collective dataset read + plist_id = H5Pcreate(H5P_DATASET_XFER); + H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); + + openfpm::vector<size_t> n_block; + n_block.resize(v_cl.getProcessingUnits()); + + + for(size_t i = 0 ; i < n_block.size() ; i++) + n_block.get(i) = mpi_size_old / v_cl.getProcessingUnits(); + + size_t rest_block = mpi_size_old % v_cl.getProcessingUnits(); + + size_t max_block; + + if (rest_block != 0) + max_block = n_block.get(0) + 1; + else + max_block = n_block.get(0); + + //for(size_t i = 0 ; i < n_block.size() ; i++) + for(size_t i = 0 ; i < rest_block ; i++) + n_block.get(i) += 1; + + size_t start_block = 0; + size_t stop_block = 0; + + + if (v_cl.getProcessUnitID() != 0) + { + for(size_t i = 0 ; i < v_cl.getProcessUnitID() ; i++) + start_block += n_block.get(i); + } + + stop_block = start_block + n_block.get(v_cl.getProcessUnitID()); + + if (mpi_rank >= mpi_size_old) + load_block(start_block,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,g_m,v_pos,v_prp); + else + { + size_t n_bl = 0; + size_t lb = start_block; + for ( ; lb < stop_block ; lb++, n_bl++) + load_block(lb,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,g_m,v_pos,v_prp); + + if (n_bl < max_block) + load_block(-1,mpi_size_old,metadata_out,metadata_accum,plist_id,dataset_2,g_m,v_pos,v_prp); + } + + // Close the dataset. + H5Dclose(dataset); + H5Dclose(dataset_2); + // Close the file. + H5Fclose(file); + H5Pclose(plist_id); + } +}; + + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_READER_VD_HPP_ */ diff --git a/src/HDF5_wr/HDF5_wr.hpp b/src/HDF5_wr/HDF5_wr.hpp new file mode 100644 index 0000000..f90c60a --- /dev/null +++ b/src/HDF5_wr/HDF5_wr.hpp @@ -0,0 +1,18 @@ +/* + * HDF5_wr.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_WR_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_WR_HPP_ + + +#define VECTOR_DIST 1 + +#include "HDF5_writer.hpp" +#include "HDF5_reader.hpp" + + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_WR_HPP_ */ diff --git a/src/HDF5_wr/HDF5_writer.hpp b/src/HDF5_wr/HDF5_writer.hpp new file mode 100644 index 0000000..b0ac16c --- /dev/null +++ b/src/HDF5_wr/HDF5_writer.hpp @@ -0,0 +1,28 @@ +/* + * HDF5_writer.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_HPP_ + + +#include "VCluster/VCluster.hpp" +#include <hdf5.h> + +template <unsigned int type> +class HDF5_writer +{ + + void save() + { + std::cerr << __FILE__ << ":" << __LINE__ << " Error: we do not know how to write this type of data" << std::endl; + } + +}; + +#include "../HDF5_wr/HDF5_writer_vd.hpp" + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_HPP_ */ diff --git a/src/HDF5_wr/HDF5_writer_unit_tests.hpp b/src/HDF5_wr/HDF5_writer_unit_tests.hpp new file mode 100644 index 0000000..0fc72b1 --- /dev/null +++ b/src/HDF5_wr/HDF5_writer_unit_tests.hpp @@ -0,0 +1,115 @@ +/* + * HDF5_writer_unit_test.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_UNIT_TESTS_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_UNIT_TESTS_HPP_ + +#include "HDF5_wr.hpp" + +#include "hdf5.h" + +BOOST_AUTO_TEST_SUITE( vd_hdf5_chckpnt_rstrt_test ) + +// Dimensionality +const size_t dim = 3; + +BOOST_AUTO_TEST_CASE( vector_dist_hdf5_save_test ) +{ + openfpm::vector<Point<3,float>> vpos; + openfpm::vector<aggregate<float[dim]>> vprp; + + // Put forces + + for (size_t i = 0 ; i < 1024 ; i++) + { + Point<3,float> p; + + p.get(0) = i; + p.get(1) = i+13; + p.get(2) = i+17; + + vpos.add(p); + + vprp.add(); + vprp.template get<0>(vprp.size()-1)[0] = p.get(0) + 100.0; + vprp.template get<0>(vprp.size()-1)[1] = p.get(1) + 200.0; + vprp.template get<0>(vprp.size()-1)[2] = p.get(2) + 300.0; + } + + HDF5_writer<VECTOR_DIST> h5; + + // Save the vector + h5.save("vector_dist.h5",vpos,vprp); + + HDF5_reader<VECTOR_DIST> h5r; + + openfpm::vector<Point<3,float>> vpos2; + openfpm::vector<aggregate<float[dim]>> vprp2; + + size_t g_m = 0; + h5r.load("vector_dist.h5",vpos2,vprp2,g_m); + + BOOST_REQUIRE_EQUAL(1024ul,vpos2.size()); + BOOST_REQUIRE_EQUAL(1024ul,vprp2.size()); + + BOOST_REQUIRE_EQUAL(1024ul,g_m); + + // Check that vpos == vpos2 and vprp2 == vprp2 + + bool check = true; + for (size_t i = 0 ; i < vpos.size() ; i++) + { + check &= (vpos.get(i) == vpos2.get(i)); + check &= (vprp.get_o(i) == vprp2.get_o(i)); + } + + BOOST_REQUIRE_EQUAL(check,true); +} + + + +BOOST_AUTO_TEST_CASE( vector_dist_hdf5_load_test ) +{ + Vcluster & v_cl = create_vcluster(); + + openfpm::vector<Point<3,float>> vpos; + openfpm::vector<aggregate<float[dim]>> vprp; + + HDF5_reader<VECTOR_DIST> h5; + + size_t g_m = 0; + + // Load the vector + h5.load("test_data/vector_dist_24.h5",vpos,vprp,g_m); + + /////////////////// Checking data /////////////////////// + + // Check total number of particles + size_t n_part = vpos.size(); + v_cl.sum(n_part); + v_cl.execute(); + + BOOST_REQUIRE_EQUAL(n_part,1024ul*24ul); + + BOOST_REQUIRE_EQUAL(vpos.size(),vprp.size()); + + bool check = true; + + for (size_t i = 0 ; i < vpos.size() ; i++) + { + check &= (vprp.template get<0>(i)[0] == vpos.template get<0>(i)[0] + 100.0); + check &= (vprp.template get<0>(i)[1] == vpos.template get<0>(i)[1] + 200.0); + check &= (vprp.template get<0>(i)[2] == vpos.template get<0>(i)[2] + 300.0); + } + + +} + +BOOST_AUTO_TEST_SUITE_END() + + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_UNIT_TESTS_HPP_ */ diff --git a/src/HDF5_wr/HDF5_writer_vd.hpp b/src/HDF5_wr/HDF5_writer_vd.hpp new file mode 100644 index 0000000..d413962 --- /dev/null +++ b/src/HDF5_wr/HDF5_writer_vd.hpp @@ -0,0 +1,161 @@ +/* + * HDF5_loader_vector_dist.hpp + * + * Created on: May 1, 2017 + * Author: i-bird + */ + +#ifndef OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_VD_HPP_ +#define OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_VD_HPP_ + +#include "Packer_Unpacker/Pack_selector.hpp" +#include "Packer_Unpacker/Packer.hpp" +#include "Packer_Unpacker/Unpacker.hpp" + +template <> +class HDF5_writer<VECTOR_DIST> +{ +public: + + template<unsigned int dim, typename St, typename prp> + inline void save(const std::string & filename, + const openfpm::vector<Point<dim,St>> & v_pos, + const openfpm::vector<prp> & v_prp) const + { + Vcluster & v_cl = create_vcluster(); + + //Pack_request vector + size_t req = 0; + + //std::cout << "V_pos.size() before save: " << v_pos.size() << std::endl; + + //Pack request + Packer<typename std::remove_reference<decltype(v_pos)>::type,HeapMemory>::packRequest(v_pos,req); + Packer<typename std::remove_reference<decltype(v_prp)>::type,HeapMemory>::packRequest(v_prp,req); + + // allocate the memory + HeapMemory pmem; + //pmem.allocate(req); + ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(req,pmem)); + mem.incRef(); + + //Packing + + Pack_stat sts; + + Packer<typename std::remove_reference<decltype(v_pos)>::type,HeapMemory>::pack(mem,v_pos,sts); + Packer<typename std::remove_reference<decltype(v_prp)>::type,HeapMemory>::pack(mem,v_prp,sts); + + /***************************************************************** + * Create a new file with default creation and access properties.* + * Then create a dataset and write data to it and close the file * + * and dataset. * + *****************************************************************/ + + int mpi_rank = v_cl.getProcessUnitID(); + int mpi_size = v_cl.getProcessingUnits(); + + MPI_Comm comm = v_cl.getMPIComm(); + MPI_Info info = MPI_INFO_NULL; + + // Set up file access property list with parallel I/O access + + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + + // Create a new file collectively and release property list identifier. + hid_t file = H5Fcreate (filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); + H5Pclose(plist_id); + + size_t sz = pmem.size(); + //std::cout << "Pmem.size: " << pmem.size() << std::endl; + openfpm::vector<size_t> sz_others; + v_cl.allGather(sz,sz_others); + v_cl.execute(); + + size_t sum = 0; + + for (size_t i = 0; i < sz_others.size(); i++) + sum += sz_others.get(i); + + //Size for data space in file + hsize_t fdim[1] = {sum}; + + //Size for data space in file + hsize_t fdim2[1] = {(size_t)mpi_size}; + + //Create data space in file + hid_t file_dataspace_id = H5Screate_simple(1, fdim, NULL); + + //Create data space in file + hid_t file_dataspace_id_2 = H5Screate_simple(1, fdim2, NULL); + + //Size for data space in memory + hsize_t mdim[1] = {pmem.size()}; + + //Create data space in memory + hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL); + + //Create data set in file + hid_t file_dataset = H5Dcreate (file, "vector_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + + //Create data set 2 in file + hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_INT, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + + //H5Pclose(plist_id); + H5Sclose(file_dataspace_id); + H5Sclose(file_dataspace_id_2); + + hsize_t block[1] = {pmem.size()}; + + //hsize_t stride[1] = {1}; + + hsize_t count[1] = {1}; + + hsize_t offset[1] = {0}; + + for (int i = 0; i < mpi_rank; i++) + { + if (mpi_rank == 0) + offset[0] = 0; + else + offset[0] += sz_others.get(i); + } + + int metadata[mpi_size]; + + for (int i = 0; i < mpi_size; i++) + metadata[i] = sz_others.get(i); + + //Select hyperslab in the file. + file_dataspace_id = H5Dget_space(file_dataset); + H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset, NULL, count, block); + + file_dataspace_id_2 = H5Dget_space(file_dataset_2); + + + //Create property list for collective dataset write. + plist_id = H5Pcreate(H5P_DATASET_XFER); + H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); + + //Write a data set to a file + H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer()); + + //Write a data set 2 to a file + H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata); + + + //Close/release resources. + H5Dclose(file_dataset); + H5Sclose(file_dataspace_id); + H5Dclose(file_dataset_2); + H5Sclose(file_dataspace_id_2); + H5Sclose(mem_dataspace_id); + H5Pclose(plist_id); + H5Fclose(file); + } + +}; + + +#endif /* OPENFPM_IO_SRC_HDF5_WR_HDF5_WRITER_VD_HPP_ */ -- GitLab