Commit 37d1890f authored by incardon's avatar incardon

Adding vector copy with different layout

parent 42183947
......@@ -263,6 +263,9 @@ private:
//! layout of the encapsulated object
typedef typename memory_traits_lin<T>::type Mem;
//! layout of the encapsulated object
typedef typename memory_traits_inte<T>::type Mem2;
public:
//! indicate the it is an encapsulated object
......@@ -358,7 +361,7 @@ public:
* \return itself
*
*/
__device__ inline encapc<dim,T,Mem> & operator=(const encapc<dim,T,Mem> & ec)
__device__ __host__ inline encapc<dim,T,Mem> & operator=(const encapc<dim,T,Mem> & ec)
{
copy_cpu_encap_encap<encapc<dim,T,Mem>,encapc<dim,T,Mem>> cp(ec,*this);
......@@ -367,6 +370,22 @@ public:
return *this;
}
/*! \brief Assignment
*
* \param ec object encapsulated to copy
*
* \return itself
*
*/
__device__ __host__ inline encapc<dim,T,Mem> & operator=(const encapc<dim,T,Mem2> & ec)
{
copy_cpu_encap_encap<encapc<dim,T,Mem2>,encapc<dim,T,Mem>> cp(ec,*this);
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
return *this;
}
/*! \brief Assignment
*
* \param obj object to copy
......@@ -374,7 +393,7 @@ public:
* \return itself
*
*/
__device__ inline encapc<dim,T,Mem> & operator=(const T & obj)
__device__ __host__ inline encapc<dim,T,Mem> & operator=(const T & obj)
{
copy_fusion_vector<typename T::type> cp(obj.data,data_c);
......@@ -457,6 +476,9 @@ class encapc<dim,T,typename memory_traits_inte<T>::type>
//! type of layout
typedef typename memory_traits_inte<T>::type Mem;
//! layout of the encapsulated object
typedef typename memory_traits_lin<T>::type Mem2;
//! reference to the encapsulated object
Mem & data;
......@@ -522,6 +544,22 @@ public:
return *this;
}
/*! \brief Assignment
*
* \param ec encapsulator
*
* \return itself
*
*/
__device__ __host__ inline encapc<dim,T,Mem> & operator=(const encapc<dim,T,Mem2> & ec)
{
copy_cpu_encap_encap<encapc<dim,T,Mem2>,encapc<dim,T,Mem>> cp(ec,*this);
boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
return *this;
}
/*! \brief Assignment
*
* \param obj object to copy
......
......@@ -1155,6 +1155,30 @@ public:
this->get_o(key1) = g.get_o(key2);
}
/*! \brief Set an element of the grid from another element of another grid
*
* \param key1 element of the grid to set
* \param g source grid
* \param key2 element of the source grid to copy
*
*/
template<typename Mem,typename layout2, template <typename> class layout_base2> inline void set_general(const grid_key_dx<dim> & key1,
const grid_base_impl<dim,T,Mem,layout2,layout_base2> & g,
const grid_key_dx<dim> & key2)
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
#ifdef SE_CLASS1
check_init();
check_bound(key1);
check_bound(g,key2);
#endif
this->get_o(key1) = g.get_o(key2);
}
/*! \brief return the size of the grid
*
* \return Return the size of the grid
......
......@@ -365,7 +365,7 @@ struct host_to_device_impl
kernel_type,
layout_base,
is_vector<typename mem_r_type::value_type>::value>
::template transform<Memory,mem_r_type>(static_cast<CudaMemory *>(boost::fusion::at_c<boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type::value>(dst).mem),
::template transform<Memory,mem_r_type>(static_cast<Memory *>(boost::fusion::at_c<boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type::value>(dst).mem),
boost::fusion::at_c<boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type::value>(dst).mem_r,
start,
stop);
......
......@@ -629,6 +629,29 @@ public:
return *this;
}
/*! \brief Constructor from a temporal object
*
* \param cell Cell list structure
*
* \return itself
*
*/
template<typename Mem_type2>
CellList<dim,T,Mem_type,transform> & operator=(const CellList<dim,T,Mem_type2,transform> & cell)
{
NNc_full = cell.private_get_NNc_full();
NNc_sym = cell.private_get_NNc_sym();
Mem_type::copy_general(static_cast<const Mem_type2 &>(cell));
static_cast<CellDecomposer_sm<dim,T,transform> &>(*this) = static_cast<const CellDecomposer_sm<dim,T,transform> &>(cell);
n_dec = cell.get_ndec();
from_cd = cell.private_get_from_cd();
return *this;
}
/*! \brief Get an iterator over particles following the cell structure
*
* \param dom_cells cells in the domain
......@@ -1095,6 +1118,21 @@ public:
#endif
const NNc_array<dim,(unsigned int)openfpm::math::pow(3,dim)> & private_get_NNc_full () const
{
return NNc_full;
}
const NNc_array<dim,(unsigned int)openfpm::math::pow(3,dim)/2+1> & private_get_NNc_sym () const
{
return NNc_sym;
}
bool private_get_from_cd() const
{
return from_cd;
}
/////////////////////////////////////
/////////////////////////////////////
......
......@@ -184,6 +184,20 @@ public:
this->swap(mem);
}
/*! \brief copy an object Mem_fast
*
* \param mem Mem_fast to copy
*
*/
template<typename Memory2>
inline void copy_general(const Mem_fast<Memory2,local_index> & mem)
{
slot = mem.private_get_slot();
cl_n = mem.private_get_cl_n();
cl_base = mem.private_get_cl_base();
}
/*! \brief Add an element to the cell
*
* \param cell_id id of the cell
......@@ -393,6 +407,36 @@ public:
#endif
/*! \brief Return the private data-structure cl_n
*
* \return cl_n
*
*/
const openfpm::vector<aggregate<local_index>,Memory> & private_get_cl_n() const
{
return cl_n;
}
/*! \brief Return the private slot
*
* \return slot
*
*/
const int & private_get_slot() const
{
return slot;
}
/*! \brief Return the private data-structure cl_base
*
* \return cl_base
*
*/
const base & private_get_cl_base() const
{
return cl_base;
}
};
......
......@@ -44,6 +44,57 @@
namespace openfpm
{
template<bool is_ok_cuda,typename T, typename Memory,
typename layout, template<typename> class layout_base,
typename grow_p>
struct add_prp_device_impl
{
template <typename S,
typename M,
typename gp,
unsigned int impl,
template <typename> class layout_base2,
unsigned int ...args>
static void run(openfpm::vector<T,Memory,layout,layout_base,grow_p,impl> & this_ ,const openfpm::vector<S,M,typename layout_base2<S>::type,layout_base2,gp,impl> & v)
{
std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work with cuda enabled vector" << std::endl;
}
};
template<typename T, typename Memory,
typename layout, template<typename> class layout_base,
typename grow_p>
struct add_prp_device_impl<true,T,Memory,layout,layout_base,grow_p>
{
template <typename S,
typename M,
typename gp,
unsigned int impl,
template <typename> class layout_base2,
unsigned int ...args>
static void run(vector<T,Memory,layout,layout_base,grow_p,impl> & this_ ,const vector<S,M,typename layout_base2<S>::type,layout_base2,gp,impl> & v)
{
#ifdef SE_CLASS2
check_valid(&this_,8);
#endif
// merge the data on device
#if defined(CUDA_GPU) && defined(__NVCC__)
size_t old_sz = this_.size();
this_.resize(this_.size() + v.size(),DATA_ON_DEVICE);
auto ite = v.getGPUIterator();
merge_add_prp_device_impl<decltype(v.toKernel()),decltype(this_.toKernel()),args...><<<ite.wthr,ite.thr>>>(v.toKernel(),this_.toKernel(),(unsigned int)old_sz);
#else
std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
#endif
}
};
/*! \brief Implementation of 1-D std::vector like structure
*
* Stub object look at the various implementations
......@@ -646,6 +697,9 @@ namespace openfpm
unsigned int ...args>
void add_prp_device(const vector<S,M,typename layout_base2<S>::type,layout_base2,gp,impl> & v)
{
add_prp_device_impl<std::is_same<Memory,CudaMemory>::value,T,Memory,layout,layout_base,grow_p>
::template run<S,M,gp,impl,layout_base2,args...>(*this,v);
/*
#ifdef SE_CLASS2
check_valid(this,8);
#endif
......@@ -662,7 +716,7 @@ namespace openfpm
#else
std::cout << __FILE__ << ":" << __LINE__ << " Error the function add_prp_device only work when map_vector is compiled with nvcc" << std::endl;
#endif
#endif*/
}
/*! \brief Insert an entry in the vector
......@@ -1198,6 +1252,60 @@ namespace openfpm
return *this;
}
/*! \brief Assignment operator
*
* move semantic movement operator=
*
* \param mv vector
*
* \return itself
*
*/
template<typename Mem, template <typename> class layout_base2>
vector<T, Memory,layout,layout_base2,grow_p,OPENFPM_NATIVE> & operator=(vector<T, Mem, layout, layout_base2,grow_p,OPENFPM_NATIVE> && mv)
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
v_size = mv.v_size;
base.swap(mv.base);
return *this;
}
/*! \brief Assignment operator
*
*
* \param mv vector to copy
*
* \return itself
*
*
*/
template<typename Mem,
typename layout2,
template <typename> class layout_base2,
typename check = typename std::enable_if<!std::is_same<layout2,layout>::value >::type>
vector<T, Memory,layout,layout_base,grow_p,OPENFPM_NATIVE> &
operator=(const vector<T, Mem, layout2, layout_base2 ,grow_p,OPENFPM_NATIVE> & mv)
{
#ifdef SE_CLASS2
check_valid(this,8);
#endif
v_size = mv.getInternal_v_size();
size_t rsz[1] = {v_size};
base.resize(rsz);
// copy the object
for (size_t i = 0 ; i < v_size ; i++ )
{
grid_key_dx<1> key(i);
base.set_general(key,mv.getInternal_base(),key);
}
return *this;
}
/*! \brief Check that two vectors are equal
*
* \param vector to compare
......@@ -1527,7 +1635,7 @@ namespace openfpm
*/
template<unsigned int ... prp> void hostToDevice()
{
base.template hostToDevice<prp ...>(0,v_size-1);
base.template hostToDevice<prp ...>();
}
/*! \brief Synchronize the memory buffer in the device with the memory in the host
......
......@@ -507,6 +507,7 @@ void scan(openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_
cl_n_scan.resize(raw_size);
}
#endif
#endif /* SCAN_CUDA_CUH_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment