Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Sbalzarini Lab
S
Software
P
Parallel Computing
OpenFPM
openfpm_pdata
Commits
5252630c
Commit
5252630c
authored
May 02, 2017
by
incardon
Browse files
Refactoring HDF5 load
parent
0382f18f
Changes
1
Hide whitespace changes
Inline
Side-by-side
src/Grid/grid_dist_id.hpp
View file @
5252630c
...
...
@@ -1663,42 +1663,11 @@ public:
void
map
()
{
getGlobalGridsInfo
(
gdb_ext_global
);
/*
std::cout << "Global size: " << gdb_ext_global.size() << std::endl;
for (size_t i = 0; i < gdb_ext_global.size(); i++)
{
std::cout << "(" << gdb_ext_global.get(i).Dbox.getLow(0) << "; " << gdb_ext_global.get(i).Dbox.getLow(1) << "); (" << gdb_ext_global.get(i).Dbox.getHigh(0) << "; " << gdb_ext_global.get(i).Dbox.getHigh(1) << ")" << std::endl;
std::cout << "I = " << i << ", Origin is (" << gdb_ext_global.get(i).origin.get(0) << "; " << gdb_ext_global.get(i).origin.get(1) << ")" << std::endl;
}
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < gdb_ext.size(); i++)
{
Box<dim,long int> box = gdb_ext.get(i).Dbox;
box += gdb_ext.get(i).origin;
std::cout << "(" << box.getLow(0) << "; " << box.getLow(1) << "); (" << box.getHigh(0) << "; " << box.getHigh(1) << ")" << std::endl;
}
}
if (v_cl.getProcessUnitID() == 0)
{
for (size_t i = 0; i < loc_grid_old.size(); i++)
{
Point<dim,St> p1;
Point<dim,St> p2;
for (size_t n = 0; n < dim; n++)
{
p1.get(n) = loc_grid_old.get(i).getGrid().getBox().getLow(n);
p2.get(n) = loc_grid_old.get(i).getGrid().getBox().getHigh(n);
}
std::cout << "Loc_grid_old: (" << p1.get(0) << "; " << p1.get(1) << "); (" << p2.get(0) << "; " << p2.get(1) << "); " << "Gdb_ext_old: (" << gdb_ext_old.get(i).Dbox.getLow(0) << "; " << gdb_ext_old.get(i).Dbox.getLow(1) << "); (" << gdb_ext_old.get(i).Dbox.getHigh(0) << "; " << gdb_ext_old.get(i).Dbox.getHigh(1) << ")" << std::endl;
}
}
*/
this
->
template
map_
(
dec
,
cd_sm
,
loc_grid
,
loc_grid_old
,
gdb_ext
,
gdb_ext_old
,
gdb_ext_global
);
loc_grid_old
.
clear
();
gdb_ext_old
.
clear
();
}
inline
void
save
(
const
std
::
string
&
filename
)
const
...
...
@@ -1708,368 +1677,14 @@ public:
h5s
.
save
(
filename
,
loc_grid
,
gdb_ext
);
}
void
load_block
(
long
int
bid
,
hssize_t
mpi_size_old
,
int
*
metadata_out
,
openfpm
::
vector
<
size_t
>
&
metadata_accum
,
hid_t
plist_id
,
hid_t
dataset_2
)
{
/* if (mpi_size >= mpi_size_old)
{
if (mpi_rank >= mpi_size_old)
block[0] = 0;
else
block[0] = {(size_t)metadata_out[mpi_rank]};
}
else
{
int x = mpi_size_old/mpi_size;
int shift = mpi_rank*x;
for (int i = 0; i < x; i++)
{
//block0.get(mpi_rank).add(metadata_out[shift]);
block[0] += metadata_out[shift];
shift++;
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
block_add[0] += metadata_out[mpi_size*x+mpi_rank];
//block_add0.get(mpi_rank).add(metadata_out[mpi_size*x+mpi_rank]);
}
}*/
// std::cout << "BID: " << bid << std::endl;
hsize_t
offset
[
1
];
hsize_t
block
[
1
];
if
(
bid
<
mpi_size_old
&&
bid
!=
-
1
)
{
offset
[
0
]
=
metadata_accum
.
get
(
bid
);
block
[
0
]
=
metadata_out
[
bid
];
}
else
{
offset
[
0
]
=
0
;
block
[
0
]
=
0
;
}
// std::cout << "Offset: " << offset[0] << "; Block: " << block[0]<< std::endl;
// hsize_t offset_add[1] = {0};
/* if (mpi_size >= mpi_size_old)
{
if (mpi_rank >= mpi_size_old)
offset[0] = 0;
else
{
for (int i = 0; i < mpi_rank; i++)
offset[0] += metadata_out[i];
}
}
else
{
int x = mpi_size_old/mpi_size;
int shift = mpi_rank*x;
for (int i = 0; i < shift; i++)
{
offset[0] += metadata_out[i];
//offset0.get(mpi_rank).add(metadata_out[i]);
}
int y = mpi_size_old%mpi_size;
if (mpi_rank < y)
{
for (int i = 0; i < mpi_size*x + mpi_rank; i++)
{
offset_add[0] += metadata_out[i];
//offset_add0.get(mpi_rank).add(metadata_out[i]);
}
}
}*/
//hsize_t stride[1] = {1};
hsize_t
count
[
1
]
=
{
1
};
//std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << ", Offset: " << offset[0] << ", Offset_add: " << offset_add[0] << ", Block: " << block[0] << ", Block_add: " << block_add[0] << std::endl;
/*
std::cout << "LOAD: MPI rank: " << mpi_rank << ", MPI size: " << mpi_size << std::endl;
for (size_t i = 0; i < offset0.get(mpi_rank).size(); i++)
std::cout << ", Offset: " << offset0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < offset_add0.get(mpi_rank).size(); i++)
std::cout << ", Offset_add: " << offset_add0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block0.get(mpi_rank).size(); i++)
std::cout << ", Block: " << block0.get(mpi_rank).get(i) << std::endl;
for (size_t i = 0; i < block_add0.get(mpi_rank).size(); i++)
std::cout << ", Block_add: " << block_add0.get(mpi_rank).get(i) << std::endl;
*/
//Select file dataspace
hid_t
file_dataspace_id_2
=
H5Dget_space
(
dataset_2
);
H5Sselect_hyperslab
(
file_dataspace_id_2
,
H5S_SELECT_SET
,
offset
,
NULL
,
count
,
block
);
//Select file dataspace
/* hid_t file_dataspace_id_3 = H5Dget_space(dataset_2);
H5Sselect_hyperslab(file_dataspace_id_3, H5S_SELECT_SET, offset_add, NULL, count, block_add);*/
hsize_t
mdim_2
[
1
]
=
{
block
[
0
]};
// hsize_t mdim_3[1] = {block_add[0]};
//Size for data space in memory
/*if (mpi_rank >= mpi_size_old)
mdim_2[0] = 0;
else
mdim_2[0] = metadata_out[mpi_rank];*/
//Create data space in memory
hid_t
mem_dataspace_id_2
=
H5Screate_simple
(
1
,
mdim_2
,
NULL
);
// hid_t mem_dataspace_id_3 = H5Screate_simple(1, mdim_3, NULL);
//if (mpi_rank == 0)
/* {
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_2);
printf ("\nLOAD: memspace_id_2 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_2);
printf ("LOAD: dataspace_id_2 size: %llu\n", size2);
}*/
/*
if (mpi_rank == 0)
{
hssize_t size2;
size2 = H5Sget_select_npoints (mem_dataspace_id_3);
printf ("\nLOAD: memspace_id_3 size: %llu\n", size2);
size2 = H5Sget_select_npoints (file_dataspace_id_3);
printf ("LOAD: dataspace_id_3 size: %llu\n", size2);
}
*/
/* size_t sum = 0;
for (int i = 0; i < mpi_size_old; i++)
{
sum += metadata_out[i];
}*/
// std::cout << "LOAD: sum: " << sum << std::endl;
// allocate the memory
HeapMemory
pmem
;
// HeapMemory pmem2;
//pmem.allocate(req);
ExtPreAlloc
<
HeapMemory
>
&
mem
=
*
(
new
ExtPreAlloc
<
HeapMemory
>
(
block
[
0
],
pmem
));
mem
.
incRef
();
// ExtPreAlloc<HeapMemory> & mem2 = *(new ExtPreAlloc<HeapMemory>(block_add[0],pmem2));
// mem2.incRef();
// Read the dataset.
H5Dread
(
dataset_2
,
H5T_NATIVE_CHAR
,
mem_dataspace_id_2
,
file_dataspace_id_2
,
plist_id
,
(
char
*
)
mem
.
getPointer
());
// Read the dataset.
// H5Dread(dataset_2, H5T_NATIVE_CHAR, mem_dataspace_id_3, file_dataspace_id_3, plist_id, (char *)mem2.getPointer());
mem
.
allocate
(
pmem
.
size
());
// mem2.allocate(pmem2.size());
// std::cout << "Mem.size(): " << mem.size() << " = " << block[0] << std::endl;
Unpack_stat
ps
;
openfpm
::
vector
<
device_grid
>
loc_grid_old_unp
;
openfpm
::
vector
<
GBoxes
<
device_grid
::
dims
>>
gdb_ext_old_unp
;
Unpacker
<
decltype
(
loc_grid_old
),
HeapMemory
>::
unpack
(
mem
,
loc_grid_old_unp
,
ps
,
1
);
Unpacker
<
decltype
(
gdb_ext_old
),
HeapMemory
>::
unpack
(
mem
,
gdb_ext_old_unp
,
ps
,
1
);
/*
std::cout << "Loc_grid_old.size() before merge: " << loc_grid_old.size() << std::endl;
std::cout << "Gdb_ext_old.size() before merge: " << gdb_ext_old.size() << std::endl;
std::cout << "Loc_grid_old_unp.size() before merge: " << loc_grid_old_unp.size() << std::endl;
std::cout << "Gdb_ext_old_unp.size() before merge: " << gdb_ext_old_unp.size() << std::endl;
*/
for
(
size_t
i
=
0
;
i
<
loc_grid_old_unp
.
size
();
i
++
)
loc_grid_old
.
add
(
loc_grid_old_unp
.
get
(
i
));
for
(
size_t
i
=
0
;
i
<
gdb_ext_old_unp
.
size
();
i
++
)
gdb_ext_old
.
add
(
gdb_ext_old_unp
.
get
(
i
));
// std::cout << "Loc_grid_old.size() after merge: " << loc_grid_old.size() << std::endl;
// std::cout << "Gdb_ext_old.size() after merge: " << gdb_ext_old.size() << std::endl;
// std::cout << "*********************************" << std::endl;
mem
.
decRef
();
delete
&
mem
;
}
inline
void
load
(
const
std
::
string
&
filename
)
{
MPI_Comm
comm
=
v_cl
.
getMPIComm
();
MPI_Info
info
=
MPI_INFO_NULL
;
int
mpi_rank
=
v_cl
.
getProcessUnitID
();
//int mpi_size = v_cl.getProcessingUnits();
// Set up file access property list with parallel I/O access
hid_t
plist_id
=
H5Pcreate
(
H5P_FILE_ACCESS
);
H5Pset_fapl_mpio
(
plist_id
,
comm
,
info
);
//Open a file
hid_t
file
=
H5Fopen
(
filename
.
c_str
(),
H5F_ACC_RDONLY
,
plist_id
);
H5Pclose
(
plist_id
);
//Open dataset
hid_t
dataset
=
H5Dopen
(
file
,
"metadata"
,
H5P_DEFAULT
);
//Create property list for collective dataset read
plist_id
=
H5Pcreate
(
H5P_DATASET_XFER
);
H5Pset_dxpl_mpio
(
plist_id
,
H5FD_MPIO_COLLECTIVE
);
//Select file dataspace
hid_t
file_dataspace_id
=
H5Dget_space
(
dataset
);
hssize_t
mpi_size_old
=
H5Sget_select_npoints
(
file_dataspace_id
);
//if (mpi_rank == 0)
//printf ("\nOld MPI size: %llu\n", mpi_size_old);
//Where to read metadata
int
metadata_out
[
mpi_size_old
];
for
(
int
i
=
0
;
i
<
mpi_size_old
;
i
++
)
{
metadata_out
[
i
]
=
0
;
}
//Size for data space in memory
hsize_t
mdim
[
1
]
=
{(
size_t
)
mpi_size_old
};
//Create data space in memory
hid_t
mem_dataspace_id
=
H5Screate_simple
(
1
,
mdim
,
NULL
);
/*
if (mpi_rank == 0)
{
hssize_t size;
size = H5Sget_select_npoints (mem_dataspace_id);
printf ("\nmemspace_id size: %llu\n", size);
size = H5Sget_select_npoints (file_dataspace_id);
printf ("dataspace_id size: %llu\n", size);
}
*/
// Read the dataset.
H5Dread
(
dataset
,
H5T_NATIVE_INT
,
mem_dataspace_id
,
file_dataspace_id
,
plist_id
,
metadata_out
);
/*
if (mpi_rank == 0)
{
std::cout << "Metadata_out[]: ";
for (int i = 0; i < mpi_size_old; i++)
{
std::cout << metadata_out[i] << " ";
}
std::cout << " " << std::endl;
}
*/
openfpm
::
vector
<
size_t
>
metadata_accum
;
metadata_accum
.
resize
(
mpi_size_old
);
metadata_accum
.
get
(
0
)
=
0
;
for
(
int
i
=
1
;
i
<
mpi_size_old
;
i
++
)
metadata_accum
.
get
(
i
)
=
metadata_accum
.
get
(
i
-
1
)
+
metadata_out
[
i
-
1
];
//Open dataset
hid_t
dataset_2
=
H5Dopen
(
file
,
"grid_dist"
,
H5P_DEFAULT
);
HDF5_reader
<
GRID_DIST
>
h5l
;
//Create property list for collective dataset read
plist_id
=
H5Pcreate
(
H5P_DATASET_XFER
);
H5Pset_dxpl_mpio
(
plist_id
,
H5FD_MPIO_COLLECTIVE
);
/////////////////////////////////////
openfpm
::
vector
<
size_t
>
n_block
;
n_block
.
resize
(
v_cl
.
getProcessingUnits
());
for
(
size_t
i
=
0
;
i
<
n_block
.
size
()
;
i
++
)
n_block
.
get
(
i
)
=
mpi_size_old
/
v_cl
.
getProcessingUnits
();
size_t
rest_block
=
mpi_size_old
%
v_cl
.
getProcessingUnits
();
// std::cout << "MPI size old: " << mpi_size_old << std::endl;
//std::cout << "MPI size: " << v_cl.getProcessingUnits() << std::endl;
// std::cout << "Rest block: " << rest_block << std::endl;
size_t
max_block
;
if
(
rest_block
!=
0
)
max_block
=
n_block
.
get
(
0
)
+
1
;
else
max_block
=
n_block
.
get
(
0
);
//for(size_t i = 0 ; i < n_block.size() ; i++)
for
(
size_t
i
=
0
;
i
<
rest_block
;
i
++
)
n_block
.
get
(
i
)
+=
1
;
//for(size_t i = 0 ; i < n_block.size() ; i++)
//std::cout << "n_block.get(i): " << n_block.get(i) << std::endl;
size_t
start_block
=
0
;
size_t
stop_block
=
0
;
if
(
v_cl
.
getProcessUnitID
()
!=
0
)
{
for
(
size_t
i
=
0
;
i
<
v_cl
.
getProcessUnitID
()
;
i
++
)
start_block
+=
n_block
.
get
(
i
);
}
stop_block
=
start_block
+
n_block
.
get
(
v_cl
.
getProcessUnitID
());
// std::cout << "ID: " << v_cl.getProcessUnitID() << "; Start block: " << start_block << "; " << "Stop block: " << stop_block << std::endl;
if
(
mpi_rank
>=
mpi_size_old
)
load_block
(
start_block
,
mpi_size_old
,
metadata_out
,
metadata_accum
,
plist_id
,
dataset_2
);
else
{
size_t
n_bl
=
0
;
size_t
lb
=
start_block
;
for
(
;
lb
<
stop_block
;
lb
++
,
n_bl
++
)
load_block
(
lb
,
mpi_size_old
,
metadata_out
,
metadata_accum
,
plist_id
,
dataset_2
);
if
(
n_bl
<
max_block
)
load_block
(
-
1
,
mpi_size_old
,
metadata_out
,
metadata_accum
,
plist_id
,
dataset_2
);
}
////////////////////////////////////
//std::cout << "LOAD: sum: " << sum << std::endl;
// Close the dataset.
H5Dclose
(
dataset
);
H5Dclose
(
dataset_2
);
// Close the file.
H5Fclose
(
file
);
H5Pclose
(
plist_id
);
h5l
.
load
<
device_grid
>
(
filename
,
loc_grid_old
,
gdb_ext_old
);
// Map the distributed grid
map
();
/*
for (size_t i = 0; i < loc_grid.size(); i++)
{
std::cout << "loc_grids loaded: (" << loc_grid.get(i).getGrid().getBox().getLow(0) << "; " << loc_grid.get(i).getGrid().getBox().getLow(1) << "); (" << loc_grid.get(i).getGrid().getBox().getHigh(0) << "; " << loc_grid.get(i).getGrid().getBox().getHigh(1) << ")" << std::endl;
}*/
}
//! Define friend classes
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment