Commit cc6cdca7 authored by incardon's avatar incardon

Several fix 2GB HDF5 + NO_POSITION wrong on GPU + CSV Writer + Adding DETERMINISTIC cell list

parent bb27fd90
......@@ -39,9 +39,12 @@ if(ENABLE_GPU)
elseif ( CUDA_VERSION_MAJOR EQUAL 10 AND CUDA_VERSION_MINOR EQUAL 1 )
message("CUDA is compatible")
set(WARNING_SUPPRESSION_AND_OPTION_NVCC -Xcudafe "--display_error_number --diag_suppress=2915 --diag_suppress=2912 --diag_suppress=2913 --diag_suppress=111 --diag_suppress=186 --diag_suppress=611 --diag_suppress=2928 --diag_suppress=2931 --diag_suppress=2929 --diag_suppress=2930" --expt-extended-lambda)
elseif ( CUDA_VERSION_MAJOR EQUAL 10 AND CUDA_VERSION_MINOR EQUAL 2 )
message("CUDA is compatible")
set(WARNING_SUPPRESSION_AND_OPTION_NVCC -Xcudafe "--display_error_number --diag_suppress=2976 --diag_suppress=2977 --diag_suppress=2978 --diag_suppress=2979 --diag_suppress=1835 --diag_suppress=611 --diag_suppress=186 --diag_suppress=128" --expt-extended-lambda)
set(WARNING_SUPPRESSION_AND_OPTION_NVCC_TEXT "-Xcudafe \"--display_error_number --diag_suppress=2976 --diag_suppress=2977 --diag_suppress=2978 --diag_suppress=2979 --diag_suppress=1835 --diag_suppress=611 --diag_suppress=186 --diag_suppress=128\" --expt-extended-lambda")
else()
message(FATAL_ERROR "CUDA is incompatible, version 9.2 is only supported")
message(FATAL_ERROR "CUDA is incompatible, version 9.2 10.1 and 10.2 is only supported")
endif()
endif()
......
......@@ -14,7 +14,7 @@
#include <fstream>
#include "util/common.hpp"
#include <boost/mpl/range_c.hpp>
#include "util/for_each_ref_host.hpp"
#include <boost/mpl/for_each.hpp>
#include "csv_multiarray.hpp"
#include "util/util.hpp"
#include "is_csv_writable.hpp"
......@@ -57,7 +57,7 @@ struct csv_prp
void operator()(T& t)
{
// This is the type of the csv column
typedef decltype(obj.template get<T::value>()) col_type;
typedef typename boost::mpl::at<typename Tobj::type,T>::type col_type;
// Remove the reference from the column type
typedef typename boost::remove_reference<col_type>::type col_rtype;
......@@ -183,7 +183,7 @@ class CSVWriter
csv_col<typename v_prp::value_type,has_attributes<typename v_prp::value_type>::value> col(str);
// Iterate through all the vertex and create the vertex list
boost::mpl::for_each_ref_host< boost::mpl::range_c<int,0,v_prp::value_type::max_prop> >(col);
boost::mpl::for_each< boost::mpl::range_c<int,0,v_prp::value_type::max_prop> >(col);
str << "\n";
......@@ -225,7 +225,7 @@ class CSVWriter
csv_prp<decltype(obj)> c_prp(str,obj);
// write the properties to the stream string
boost::mpl::for_each_ref_host< boost::mpl::range_c<int,0,v_prp::value_type::max_prop> >(c_prp);
boost::mpl::for_each< boost::mpl::range_c<int,0,v_prp::value_type::max_prop> >(c_prp);
str << "\n";
}
......
......@@ -137,7 +137,8 @@ struct csv_value_str
template<typename T,size_t N1, bool is_writable>
struct csv_value_str<T[N1], is_writable>
{
inline csv_value_str(const T v[N1], std::stringstream & str)
template<typename ArrObject>
inline csv_value_str(const ArrObject v, std::stringstream & str)
{
for (size_t i = 0 ; i < N1 ; i++)
str << "," << v[i];
......@@ -148,7 +149,8 @@ struct csv_value_str<T[N1], is_writable>
template<typename T,size_t N1,size_t N2, bool is_writable>
struct csv_value_str<T[N1][N2], is_writable>
{
inline csv_value_str(const T v[N1][N2], std::stringstream & str)
template<typename ArrObject>
inline csv_value_str(const ArrObject v, std::stringstream & str)
{
for (size_t i1 = 0 ; i1 < N1 ; i1++)
{
......@@ -164,7 +166,8 @@ struct csv_value_str<T[N1][N2], is_writable>
template<typename T,size_t N1,size_t N2,size_t N3, bool is_writable>
struct csv_value_str<T[N1][N2][N3], is_writable>
{
inline csv_value_str(const T v[N1][N2][N3], std::stringstream & str)
template<typename ArrObject>
inline csv_value_str(const ArrObject v, std::stringstream & str)
{
for (size_t i1 = 0 ; i1 < N1 ; i1++)
{
......@@ -183,7 +186,8 @@ struct csv_value_str<T[N1][N2][N3], is_writable>
template<typename T,size_t N1,size_t N2,size_t N3,size_t N4, bool is_writable>
struct csv_value_str<T[N1][N2][N3][N4],is_writable>
{
inline csv_value_str(const T v[N1][N2][N3][N4], std::stringstream & str)
template<typename ArrObject>
inline csv_value_str(const ArrObject v, std::stringstream & str)
{
for (size_t i1 = 0 ; i1 < N1 ; i1++)
{
......
......@@ -141,5 +141,37 @@ struct is_csv_writable<bool>
};
};
//! Indicate if the property T is writable in CSV
template<typename T, unsigned int N1>
struct is_csv_writable<T[N1]>
{
//! bool is writable
enum
{
value = true
};
};
//! Indicate if the property T is writable in CSV
template<typename T, unsigned int N1, unsigned int N2>
struct is_csv_writable<T[N1][N2]>
{
//! bool is writable
enum
{
value = true
};
};
//! Indicate if the property T is writable in CSV
template<typename T, unsigned int N1, unsigned int N2, unsigned int N3>
struct is_csv_writable<T[N1][N2][N3]>
{
//! bool is writable
enum
{
value = true
};
};
#endif /* OPENFPM_IO_SRC_CSVWRITER_IS_CSV_WRITABLE_HPP_ */
......@@ -19,7 +19,7 @@ class HDF5_reader<GRID_DIST>
{
template<typename device_grid> void load_block(long int bid,
hssize_t mpi_size_old,
int * metadata_out,
long int * metadata_out,
openfpm::vector<size_t> & metadata_accum,
hid_t plist_id,
hid_t dataset_2,
......@@ -42,24 +42,37 @@ class HDF5_reader<GRID_DIST>
hsize_t count[1] = {1};
// allocate the memory
HeapMemory pmem;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
mem.incRef();
//Select file dataspace
hid_t file_dataspace_id_2 = H5Dget_space(dataset_2);
H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset, NULL, count, block);
size_t to_read = block[0];
size_t coffset = 0;
hsize_t mdim_2[1] = {block[0]};
while (to_read)
{
hsize_t block_c[1];
block_c[0] = std::min((size_t)(to_read),(size_t)0x7FFFFFFF);
//Create data space in memory
hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
hsize_t offset_c[1] = {offset[0] + coffset};
H5Sselect_hyperslab(file_dataspace_id_2, H5S_SELECT_SET, offset_c, NULL, count, block_c);
// allocate the memory
HeapMemory pmem;
//pmem.allocate(req);
ExtPreAlloc<HeapMemory> & mem = *(new ExtPreAlloc<HeapMemory>(block[0],pmem));
mem.incRef();
hsize_t mdim_2[1] = {block_c[0]};
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer());
//Create data space in memory
hid_t mem_dataspace_id_2 = H5Screate_simple(1, mdim_2, NULL);
// Read the dataset.
H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_2, file_dataspace_id_2, plist_id, (char *)mem.getPointer() + coffset);
coffset += std::min((size_t)(to_read),(size_t)0x7FFFFFFF);
to_read -= std::min((size_t)(to_read),(size_t)0x7FFFFFFF);
}
mem.allocate(pmem.size());
......@@ -72,7 +85,10 @@ class HDF5_reader<GRID_DIST>
Unpacker<typename std::remove_reference<decltype(gdb_ext_old)>::type,HeapMemory>::unpack(mem,gdb_ext_old_unp,ps,1);
for (size_t i = 0; i < loc_grid_old_unp.size(); i++)
loc_grid_old.add(loc_grid_old_unp.get(i));
{
loc_grid_old.add();
loc_grid_old.last().swap(loc_grid_old_unp.get(i));
}
for (size_t i = 0; i < gdb_ext_old_unp.size(); i++)
gdb_ext_old.add(gdb_ext_old_unp.get(i));
......@@ -120,7 +136,7 @@ public:
//printf ("\nOld MPI size: %llu\n", mpi_size_old);
//Where to read metadata
int metadata_out[mpi_size_old];
long int metadata_out[mpi_size_old];
for (int i = 0; i < mpi_size_old; i++)
{
......@@ -133,30 +149,9 @@ public:
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
/*
if (mpi_rank == 0)
{
hssize_t size;
size = H5Sget_select_npoints (mem_dataspace_id);
printf ("\nmemspace_id size: %llu\n", size);
size = H5Sget_select_npoints (file_dataspace_id);
printf ("dataspace_id size: %llu\n", size);
}
*/
// Read the dataset.
H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
/*
if (mpi_rank == 0)
{
std::cout << "Metadata_out[]: ";
for (int i = 0; i < mpi_size_old; i++)
{
std::cout << metadata_out[i] << " ";
}
std::cout << " " << std::endl;
}
*/
H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace_id, file_dataspace_id, plist_id, metadata_out);
openfpm::vector<size_t> metadata_accum;
metadata_accum.resize(mpi_size_old);
......@@ -183,12 +178,6 @@ public:
size_t rest_block = mpi_size_old % v_cl.getProcessingUnits();
// std::cout << "MPI size old: " << mpi_size_old << std::endl;
//std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl;
// std::cout << "Rest block: " << rest_block << std::endl;
size_t max_block;
if (rest_block != 0)
......
......@@ -92,12 +92,6 @@ public:
//Create data space in file
hid_t file_dataspace_id_2 = H5Screate_simple(1, fdim2, NULL);
//Size for data space in memory
hsize_t mdim[1] = {pmem.size()};
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, mdim, NULL);
//if (mpi_rank == 0)
//std::cout << "Total object size: " << sum << std::endl;
......@@ -105,7 +99,7 @@ public:
hid_t file_dataset = H5Dcreate (file, "grid_dist", H5T_NATIVE_CHAR, file_dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
//Create data set 2 in file
hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_INT, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
hid_t file_dataset_2 = H5Dcreate (file, "metadata", H5T_NATIVE_LLONG, file_dataspace_id_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
//H5Pclose(plist_id);
H5Sclose(file_dataspace_id);
......@@ -132,45 +126,51 @@ public:
// std::cout << "MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Block: " << block[0] << std::endl;
int metadata[mpi_size];
long int metadata[mpi_size];
for (int i = 0; i < mpi_size; i++)
metadata[i] = sz_others.get(i);
//Select hyperslab in the file.
file_dataspace_id = H5Dget_space(file_dataset);
H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset, NULL, count, block);
file_dataspace_id_2 = H5Dget_space(file_dataset_2);
//Create property list for collective dataset write.
plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
// We slipt the write in chunk of 2GB maximum
size_t to_write = block[0];
size_t coffset = 0;
while (to_write)
{
hsize_t block_c[1];
block_c[0] = std::min((size_t)(to_write),(size_t)0x7FFFFFFF);
//Create property list for collective dataset write.
plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
//Create data space in memory
hid_t mem_dataspace_id = H5Screate_simple(1, block_c, NULL);
//Write a data set to a file
H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer());
hsize_t offset_c[1] = {offset[0] + coffset};
H5Sselect_hyperslab(file_dataspace_id, H5S_SELECT_SET, offset_c, NULL, count, block_c);
//Write a data set 2 to a file
H5Dwrite(file_dataset_2, H5T_NATIVE_INT, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
/*
for (size_t i = 0; i < gdb_ext.size(); i++)
{
Box<dim,long int> box = gdb_ext.get(i).Dbox;
std::cout << "Dboxes saved: (" << box.getLow(0) << "; " << box.getLow(1) << "); (" << box.getHigh(0) << "; " << box.getHigh(1) << ")" << std::endl;
}
for (size_t i = 0; i < loc_grid.size(); i++)
{
std::cout << "loc_grids saved: (" << loc_grid.get(i).getGrid().getBox().getLow(0) << "; " << loc_grid.get(i).getGrid().getBox().getLow(1) << "); (" << loc_grid.get(i).getGrid().getBox().getHigh(0) << "; " << loc_grid.get(i).getGrid().getBox().getHigh(1) << ")" << std::endl;
}
*/
//Write a data set to a file
H5Dwrite(file_dataset, H5T_NATIVE_CHAR, mem_dataspace_id, file_dataspace_id, plist_id, (const char *)pmem.getPointer() + coffset);
coffset += std::min((size_t)(to_write),(size_t)0x7FFFFFFF);
to_write -= std::min((size_t)(to_write),(size_t)0x7FFFFFFF);
H5Sclose(mem_dataspace_id);
}
file_dataspace_id_2 = H5Dget_space(file_dataset_2);
//Write a data set 2 to a file
H5Dwrite(file_dataset_2, H5T_NATIVE_LLONG, H5S_ALL, file_dataspace_id_2, plist_id, metadata);
//Close/release resources.
H5Dclose(file_dataset);
H5Sclose(file_dataspace_id);
H5Dclose(file_dataset_2);
H5Sclose(file_dataspace_id_2);
H5Sclose(mem_dataspace_id);
H5Pclose(plist_id);
H5Fclose(file);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment