Commit a8b2bfa3 authored by incardon's avatar incardon

Small changes for ie_ghost

parent 46ea1553
......@@ -62,6 +62,9 @@ public:
//! type of the box
typedef T btype;
//! Indicate that this is a box
typedef int yes_is_box;
//! It store the two point bounding the box
type data;
......@@ -1259,4 +1262,17 @@ public:
}
};
template<typename T, typename Sfinae = void>
struct is_Box: std::false_type {};
/*! \brief Check if a type T is an aggregate
*
* return true if T is an aggregate
*
*/
template<typename T>
struct is_Box<T, typename Void< typename T::yes_is_box>::type> : std::true_type
{};
#endif
......@@ -32,6 +32,9 @@ template<unsigned int dim ,typename T> class Point
//! boost fusion that store the point
typedef boost::fusion::vector<T[dim]> type;
//! Indicate that is a Point
typedef int yes_is_point;
//! structure that store the data of the point
type data;
......@@ -674,4 +677,17 @@ template <unsigned int N, typename T, typename Mem> std::string toPointString(co
template<unsigned int dim, typename T> using VectorS = Point<dim,T>;
template<typename T, typename Sfinae = void>
struct is_Point: std::false_type {};
/*! \brief Check if a type T is an aggregate
*
* return true if T is an aggregate
*
*/
template<typename T>
struct is_Point<T, typename Void< typename T::yes_is_point>::type> : std::true_type
{};
#endif
......@@ -57,62 +57,7 @@ __global__ void vv_test_data_get(vector_vector_type vvt, vector_out_type out, in
BOOST_AUTO_TEST_SUITE( vector_cuda_tests )
//BOOST_AUTO_TEST_CASE (test_size_of_vector_and_vector_gpu_ker)
//{
// typedef openfpm::vector<Box<3,float>,CudaMemory,typename memory_traits_inte<Box<3,float>>::type,memory_traits_inte> proc_boxes;
// typedef openfpm::vector_gpu_ker<Box<3,float>,memory_traits_inte> proc_boxes_ker;
//
///* BOOST_REQUIRE_EQUAL(sizeof(openfpm::vector_gpu_ker<aggregate<int>,memory_traits_inte>),
// sizeof(openfpm::vector<aggregate<int>,CudaMemory,typename memory_traits_inte<aggregate<int>>::type,memory_traits_inte>));
//
//
// BOOST_REQUIRE_EQUAL(sizeof(openfpm::vector_gpu_ker<aggregate<int,int>,memory_traits_inte>),
// sizeof(openfpm::vector<aggregate<int,int>,CudaMemory,typename memory_traits_inte<aggregate<int,int>>::type,memory_traits_inte>));
//
// BOOST_REQUIRE_EQUAL(sizeof(openfpm::vector_gpu_ker<aggregate<int,int,float>,memory_traits_inte>),
// sizeof(openfpm::vector<aggregate<int,int,float>,CudaMemory,typename memory_traits_inte<aggregate<int,int,float>>::type,memory_traits_inte>));
//
//
// BOOST_REQUIRE_EQUAL(sizeof(openfpm::vector_gpu_ker<aggregate<proc_boxes_ker>,memory_traits_inte>),
// sizeof(openfpm::vector<aggregate<proc_boxes>,CudaMemory,typename memory_traits_inte<aggregate<proc_boxes>>::type,memory_traits_inte>));*/
//
//
//
//// BOOST_REQUIRE_EQUAL(sizeof(proc_boxes),sizeof(proc_boxes_ker));
//
// openfpm::vector<aggregate<proc_boxes>,CudaMemory,typename memory_traits_inte<aggregate<proc_boxes>>::type,memory_traits_inte> v_test;
//
// v_test.print_size();
//
// auto v_test_ker = v_test.toKernel();
//
// std::cout << std::endl << std::endl << std::endl;
//
// v_test_ker.print_size();
//
///* v_test.resize_no_device(5);
//
// for (size_t i = 0 ; i< v_test.size() ; i++)
// {
// v_test.template get<0>(i).resize(7);
// }
//
// auto v_test_ker = v_test.toKernel();
//
// std::cout << "SIZE: " << sizeof(proc_boxes) << " " << sizeof(proc_boxes_ker) << std::endl;
//
// size_t base = reinterpret_cast<size_t>(v_test.template get<0>(1).internal_get_size_pointer()) - reinterpret_cast<size_t>(v_test.template get<0>(0).internal_get_size_pointer());
// std::cout << std::hex << "BASE: " << base << " " << reinterpret_cast<size_t>(v_test.template get<0>(0).internal_get_size_pointer()) << std::endl;
//
// base = reinterpret_cast<size_t>(v_test_ker.template get<0>(1).internal_get_size_pointer()) - reinterpret_cast<size_t>(v_test_ker.template get<0>(0).internal_get_size_pointer());
// std::cout << "BASE: " << base << std::endl;
//
// base = reinterpret_cast<size_t>(v_test.template get<0>(2).internal_get_size_pointer()) - reinterpret_cast<size_t>(v_test.template get<0>(0).internal_get_size_pointer());
// std::cout << "BASE: " << base << std::endl;
//
// base = reinterpret_cast<size_t>(v_test_ker.template get<0>(2).internal_get_size_pointer()) - reinterpret_cast<size_t>(v_test_ker.template get<0>(0).internal_get_size_pointer());
// std::cout << "BASE: " << base << std::endl;*/
//}
BOOST_AUTO_TEST_CASE ( test_vector_of_vector_gpu )
{
......
......@@ -842,9 +842,15 @@ BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive )
{
typedef openfpm::vector_gpu<aggregate<int,long int>> test1_type;
typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<long int>>>> test2_type;
typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<Box<2,float>>>>> test3_type;
typedef openfpm::vector<Box<3,float>,CudaMemory,typename memory_traits_inte<Box<3,float>>::type,memory_traits_inte> test4_type;
typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<Box<2,float>>>> test5_type;
typedef typename toKernel_transform<memory_traits_inte,test1_type>::type tker1;
typedef typename toKernel_transform<memory_traits_inte,test2_type>::type tker2;
typedef typename toKernel_transform<memory_traits_inte,test3_type>::type tker3;
typedef typename toKernel_transform<memory_traits_inte,test4_type>::type tker4;
typedef typename toKernel_transform<memory_traits_inte,test5_type>::type tker5;
bool test = std::is_same<tker1,openfpm::vector_gpu_ker<aggregate<int, long>, memory_traits_inte>>::value;
......@@ -853,6 +859,18 @@ BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive )
test = std::is_same<tker2,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<long>, memory_traits_inte> >, memory_traits_inte>>::value;
BOOST_REQUIRE_EQUAL(test,true);
test = std::is_same<tker3,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<Box<2,float>>, memory_traits_inte> >, memory_traits_inte>>::value;
BOOST_REQUIRE_EQUAL(test,true);
test = std::is_same<tker4,openfpm::vector_gpu_ker<Box<3,float>,memory_traits_inte>>::value;
BOOST_REQUIRE_EQUAL(test,true);
test = std::is_same<tker5,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<Box<2,float>, memory_traits_inte> >, memory_traits_inte>>::value;
BOOST_REQUIRE_EQUAL(test,true);
}
BOOST_AUTO_TEST_SUITE_END()
......
......@@ -30,6 +30,8 @@ struct aggregate
typedef boost::fusion::vector<list... , SE3_ADD_PROP(sizeof...(list))> type;
typedef boost::fusion::vector<list... > type_real;
typedef int yes_is_aggregate;
type data;
/*! \brief get the properties i
......@@ -67,6 +69,8 @@ struct aggregate
static const unsigned int max_prop_real = boost::mpl::size<type>::type::value + SE3_SUB_MAX_PROP;
};
#else
......@@ -86,6 +90,8 @@ struct aggregate
//! real internal type containing the data
typedef boost::fusion::vector<list...> type_real;
typedef int yes_is_aggregate;
//! the data
type data;
......@@ -126,4 +132,18 @@ struct aggregate
#endif
template<typename T, typename Sfinae = void>
struct is_aggregate: std::false_type {};
/*! \brief Check if a type T is an aggregate
*
* return true if T is an aggregate
*
*/
template<typename T>
struct is_aggregate<T, typename Void< typename T::yes_is_aggregate>::type> : std::true_type
{};
#endif /* OPENFPM_DATA_SRC_UTIL_AGGREGATE_HPP_ */
......@@ -10,6 +10,50 @@
#include "data_type/aggregate.hpp"
/*! \brief this set of meta-functions traverse at compile time the tree-structure of types in Depth-first search.
* and transform any root node of type vector into vector_gpu_ker
*
* Consider
*
* vector_gpu<aggregate<int,vector_gpu<aggregate<int,float>>>>
*
* is a tree in this form
*
* \verbatim
*
* * vector_gpu<aggregate<...>>
* / \
* / \
* / \
* int * vector_gpu<aggregate<...>>
* / \
* / \
* / \
* int float
*
* \endverbatim
*
* The vector is transformed at compile-time into
*
* is a tree in this form
*
* \verbatim
*
* * vector_gpu_ker<aggregate<...>>
* / \
* / \
* / \
* int * vector_gpu_ker<aggregate<...>>
* / \
* / \
* / \
* int float
*
* \endverbatim
*
*
*/
namespace openfpm
{
......@@ -23,25 +67,37 @@ namespace openfpm
struct vector_gpu_ker;
}
// Definition of the box
template<unsigned int dim , typename T> class Box;
template<template <typename> class layout_base, typename T, bool = is_vector_native<T>::value>
struct toKernel_transform;
template<template <typename> class layout_base, typename ... args>
template<template <typename> class layout_base, typename T, typename ... args>
struct apply_trasform_impl
{
typedef void type;
};
template<template <typename> class layout_base, typename ... args>
struct apply_trasform_impl<layout_base,boost::fusion::vector<args...>>
template<template <typename> class layout_base, typename T, int impl, typename ... args>
struct aggregate_or_known_type
{
typedef aggregate<typename toKernel_transform<layout_base,args>::type ... > type;
};
template<template <typename> class layout_base, typename T, typename ... args>
struct apply_trasform_impl<layout_base,T,boost::fusion::vector<args...>>
{
static const int impl = is_aggregate<T>::value + is_Box<T>::value * 2 + is_Point<T>::value * 4;
typedef typename aggregate_or_known_type<layout_base,T,impl,args...>::type type;
};
template<template <typename> class layout_base,typename T>
struct apply_transform
{
typedef typename apply_trasform_impl<layout_base,typename T::type>::type type;
typedef typename apply_trasform_impl<layout_base,T,typename T::type>::type type;
};
template<template <typename> class layout_base, typename T, bool >
......@@ -59,4 +115,18 @@ struct toKernel_transform<layout_base,T,true>
typedef openfpm::vector_gpu_ker<aggr,layout_base> type;
};
/////////////////////////////////////////////////// KNOWN TYPE SPECIALIZATION //////////////////////////////////
template<template <typename> class layout_base,typename T, typename ... args>
struct aggregate_or_known_type<layout_base,T,2,args ...>
{
typedef Box<T::dims,typename T::btype > type;
};
template<template <typename> class layout_base,typename T, typename ... args>
struct aggregate_or_known_type<layout_base,T,4,args ...>
{
typedef Point<T::dims,typename T::coord_type > type;
};
#endif /* TOKERNEL_TRANSFORMATION_HPP_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment