Commit 03255a6d authored by incardon's avatar incardon

Adding Direct GPU test

parent ad71d154
......@@ -101,6 +101,12 @@ else
NVCCFLAGS+="$NVCCFLAGS -O3 "
fi
#########
## Check for MPI
ACX_MPI()
###### Check for se-class1
AC_MSG_CHECKING(whether to build with security enhancement class1)
......
......@@ -67,6 +67,39 @@ if test x = x"$MPILIBS"; then
AC_TRY_LINK([],[ call MPI_Init], [MPILIBS=" "
AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no)])])
fi
INC_PATH=$(mpic++ --showme:compile)
LIB_DIRS=$(mpic++ --showme:libdirs)
LIBS_MPI_LIST=$(mpic++ --showme:libs)
IFS=' ' read -r -a array <<< "$INC_PATH"
MPI_INC_PATH=""
for element in "${array[[@]]}"
do
if [[ x"$element" != x"-pthread" ]]; then
MPI_INC_PATH="$MPI_INC_PATH $element"
fi
done
IFS=' ' read -r -a array <<< "$LIB_DIRS"
MPI_LIB_DIRS=""
for element in "${array[[@]]}"
do
MPI_LIB_DIRS="$MPI_LIB_DIRS -L$element"
done
IFS=' ' read -r -a array <<< "$LIBS"
LIBS_MPI_LIST=""
for element in "${array[[@]]}"
do
MPI_LIBS="$LIBS_MPI_LIST -l$element"
done
AC_SUBST(MPI_INC_PATH)
AC_SUBST(MPI_LIB_DIRS)
AC_SUBST(MPI_LIBS)
if test x = x"$MPILIBS"; then
AC_CHECK_LIB(mpi, MPI_Init, [MPILIBS="-lmpi"])
fi
......
......@@ -2,14 +2,16 @@
LINKLIBS = $(DEFAULT_LIB) $(PTHREAD_LIBS) $(OPT_LIBS) $(HDF5_LDFLAGS) $(HDF5_LIBS) $(BOOST_LDFLAGS) $(CUDA_LIBS)
if BUILDCUDA
CUDA_SOURCES=../../openfpm_devices/src/memory/CudaMemory.cu
CUDA_SOURCES=../../openfpm_devices/src/memory/CudaMemory.cu VCluster/cuda/VCluster_semantic_unit_cuda_tests.cu
else
CUDA_SOURCES=
endif
FLAGS_NVCC = -Xcudafe "--display_error_number --diag_suppress=2888 --diag_suppress=111 --diag_suppress=186 " $(MPI_INC_PATH)
noinst_PROGRAMS = vcluster_test
vcluster_test_SOURCES = main.cpp VCluster/VCluster.cpp ../../openfpm_devices/src/memory/HeapMemory.cpp ../../openfpm_devices/src/memory/PtrMemory.cpp ../../openfpm_devices/src/Memleak_check.cpp $(CUDA_SOURCES)
vcluster_test_CXXFLAGS = -Wunknown-pragmas $(AM_CXXFLAGS) $(INCLUDES_PATH) $(BOOST_CPPFLAGS) $(CUDA_CFLAGS)
vcluster_test_CXXFLAGS = -Wunknown-pragmas $(AM_CXXFLAGS) $(INCLUDES_PATH) $(MPI_INC_PATH) $(BOOST_CPPFLAGS) $(CUDA_CFLAGS)
vcluster_test_CFLAGS = $(CUDA_CFLAGS)
vcluster_test_LDADD = $(LINKLIBS)
......@@ -23,7 +25,7 @@ VCluster/VCluster_base.hpp VCluster/VCluster.hpp VCluster/VCluster_meta_function
util/Vcluster_log.hpp
.cu.o :
$(NVCC) $(NVCCFLAGS) $(INCLUDES_PATH) -o $@ -c $<
$(NVCC) $(NVCCFLAGS) $(FLAGS_NVCC) $(INCLUDES_PATH) -o $@ -c $<
test: vcluster_test
source $(HOME)/openfpm_vars && cd .. && mpirun -np 3 ./src/vcluster_test && mpirun -np 4 ./src/vcluster_test
......
......@@ -49,9 +49,10 @@ class Vcluster: public Vcluster_base
typename T,
typename S,
template <typename> class layout_base = memory_traits_lin>
inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv, openfpm::vector<size_t> * sz_recv_byte, op & op_param)
inline static void process_recv(Vcluster & vcl, S & recv, openfpm::vector<size_t> * sz_recv,
openfpm::vector<size_t> * sz_recv_byte, op & op_param,size_t opt)
{
vcl.process_receive_buffer_with_prp<op,T,S,layout_base,prp...>(recv,sz_recv,sz_recv_byte,op_param);
vcl.process_receive_buffer_with_prp<op,T,S,layout_base,prp...>(recv,sz_recv,sz_recv_byte,op_param,opt);
}
};
......@@ -303,12 +304,13 @@ class Vcluster: public Vcluster_base
void process_receive_buffer_with_prp(S & recv,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
op & op_param,
size_t opt)
{
if (sz != NULL)
sz->resize(recv_buf.size());
pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S, layout_base, prp... >::unpacking(recv, recv_buf, sz, sz_byte, op_param);
pack_unpack_cond_with_prp<has_max_prop<T, has_value_type<T>::value>::value,op, T, S, layout_base, prp... >::unpacking(recv, recv_buf, sz, sz_byte, op_param,opt);
}
public:
......@@ -434,7 +436,7 @@ class Vcluster: public Vcluster_base
// Reorder the buffer
reorder_buffer(prc,tags,sz);
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz,NULL,opa);
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz,NULL,opa,0);
recv.add(send);
prc.add(root);
......@@ -550,7 +552,7 @@ class Vcluster: public Vcluster_base
// operation object
op_ssend_recv_add<void> opa;
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,NULL,NULL,opa);
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,NULL,NULL,opa,0);
}
else
{
......@@ -571,7 +573,7 @@ class Vcluster: public Vcluster_base
// operation object
op_ssend_recv_add<void> opa;
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,NULL,NULL,opa);
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,NULL,NULL,opa,0);
}
return true;
......@@ -694,7 +696,7 @@ class Vcluster: public Vcluster_base
op_ssend_recv_add<void> opa;
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz_recv,NULL,opa);
index_gen<ind_prop_to_pack>::template process_recv<op_ssend_recv_add<void>,T,S,layout_base>(*this,recv,&sz_recv,NULL,opa,opt);
return true;
}
......@@ -789,7 +791,7 @@ class Vcluster: public Vcluster_base
op_ssend_recv_add<void> opa;
// process the received information
process_receive_buffer_with_prp<op_ssend_recv_add<void>,T,S,layout_base,prp...>(recv,&sz_recv,NULL,opa);
process_receive_buffer_with_prp<op_ssend_recv_add<void>,T,S,layout_base,prp...>(recv,&sz_recv,NULL,opa,opt);
return true;
}
......
......@@ -3,6 +3,7 @@
#include "config.h"
#include <mpi.h>
#include <mpi-ext.h>
#include "MPI_wrapper/MPI_util.hpp"
#include "Vector/map_vector.hpp"
#include "MPI_wrapper/MPI_IallreduceW.hpp"
......
......@@ -20,7 +20,8 @@ struct unpack_selector_with_prp
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
op & op_param,
size_t opt)
{
if (sz_byte != NULL)
sz_byte->resize(recv_buf.size());
......@@ -39,7 +40,7 @@ struct unpack_selector_with_prp
size_t recv_size_old = recv.size();
// Merge the information
op_param.template execute<true,T,decltype(recv),decltype(unp),layout_base,prp...>(recv,unp,i);
op_param.template execute<true,T,decltype(recv),decltype(unp),layout_base,prp...>(recv,unp,i,opt);
size_t recv_size_new = recv.size();
......@@ -184,7 +185,8 @@ struct unpack_selector_with_prp_lin
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param,
size_t i)
size_t i,
size_t opt)
{
// create vector representation to a piece of memory already allocated
openfpm::vector<typename T::value_type,PtrMemory,typename layout_base<typename T::value_type>::type,layout_base,openfpm::grow_policy_identity> v2;
......@@ -199,7 +201,7 @@ struct unpack_selector_with_prp_lin
size_t recv_size_old = recv.size();
op_param.template execute<false,T,decltype(recv),decltype(v2),layout_base,prp...>(recv,v2,i);
op_param.template execute<false,T,decltype(recv),decltype(v2),layout_base,prp...>(recv,v2,i,opt);
size_t recv_size_new = recv.size();
......@@ -220,7 +222,8 @@ struct unpack_selector_with_prp_lin<true,T,S,layout_base>
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param,
size_t i)
size_t i,
size_t opt)
{
// calculate the number of received elements
size_t n_ele = recv_buf.get(i).size() / sizeof(typename T::value_type);
......@@ -240,7 +243,7 @@ struct unpack_selector_with_prp_lin<true,T,S,layout_base>
size_t recv_size_old = recv.size();
op_param.template execute<false,T,decltype(recv),decltype(v2),layout_base,prp...>(recv,v2,i);
op_param.template execute<false,T,decltype(recv),decltype(v2),layout_base,prp...>(recv,v2,i,opt);
size_t recv_size_new = recv.size();
......@@ -263,14 +266,15 @@ struct unpack_selector_with_prp<true,T,S,layout_base>
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
op & op_param,
size_t opt)
{
if (sz_byte != NULL)
sz_byte->resize(recv_buf.size());
for (size_t i = 0 ; i < recv_buf.size() ; )
{
i += unpack_selector_with_prp_lin<is_layout_mlin<layout_base<dummy_type>>::value,T,S,layout_base>::template call_unpack_impl<op,prp...>(recv,recv_buf,sz,sz_byte,op_param,i);
i += unpack_selector_with_prp_lin<is_layout_mlin<layout_base<dummy_type>>::value,T,S,layout_base>::template call_unpack_impl<op,prp...>(recv,recv_buf,sz,sz_byte,op_param,i,opt);
}
}
};
......@@ -297,11 +301,12 @@ struct call_serialize_variadic<index_tuple<prp...>>
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
op & op_param,
size_t opt)
{
const bool result = has_pack_gen<typename T::value_type>::value == false && is_vector<T>::value == true;
unpack_selector_with_prp<result, T, S,layout_base>::template call_unpack<op,prp...>(recv, recv_buf, sz, sz_byte, op_param);
unpack_selector_with_prp<result, T, S,layout_base>::template call_unpack<op,prp...>(recv, recv_buf, sz, sz_byte, op_param,opt);
}
};
......@@ -342,7 +347,7 @@ struct set_buf_pointer_for_each_prop
if (opt & MPI_GPU_DIRECT)
{
#if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT
send_buf.add(v.template getDevicePointer<T::value>());
send_buf.add(v.template getDeviceBuffer<T::value>());
#else
v.template deviceToHost<T::value>();
send_buf.add(v.template getPointer<T::value>());
......@@ -485,10 +490,11 @@ struct pack_unpack_cond_with_prp
openfpm::vector<BHeapMemory> & recv_buf,
openfpm::vector<size_t> * sz,
openfpm::vector<size_t> * sz_byte,
op & op_param)
op & op_param,
size_t opt)
{
typedef index_tuple<prp...> ind_prop_to_pack;
call_serialize_variadic<ind_prop_to_pack>::template call_unpack<op,T,S,layout_base>(recv, recv_buf, sz, sz_byte, op_param);
call_serialize_variadic<ind_prop_to_pack>::template call_unpack<op,T,S,layout_base>(recv, recv_buf, sz, sz_byte, op_param,opt);
}
};
......@@ -504,15 +510,44 @@ struct op_ssend_recv_add_sr
typename D,
typename S,
template <typename> class layout_base,
int ... prp> static void execute(D & recv,S & v2, size_t i)
int ... prp> static void execute(D & recv,S & v2, size_t i, size_t opt)
{
// Merge the information
recv.template add_prp<typename T::value_type,
if (opt & MPI_GPU_DIRECT)
{
#if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT
// Merge the information
recv.template add_prp_device<typename T::value_type,
PtrMemory,
openfpm::grow_policy_identity,
openfpm::vect_isel<typename T::value_type>::value,
layout_base,
prp...>(v2);
#else
// Merge the information
recv.template add_prp<typename T::value_type,
PtrMemory,
openfpm::grow_policy_identity,
openfpm::vect_isel<typename T::value_type>::value,
layout_base,
prp...>(v2);
recv.template hostToDevice<prp...>();
#endif
}
else
{
// Merge the information
recv.template add_prp<typename T::value_type,
PtrMemory,
openfpm::grow_policy_identity,
openfpm::vect_isel<typename T::value_type>::value,
layout_base,
prp...>(v2);
}
}
};
......@@ -526,7 +561,7 @@ struct op_ssend_recv_add_sr<true>
typename S,
template <typename> class layout_base,
int ... prp>
static void execute(D & recv,S & v2, size_t i)
static void execute(D & recv,S & v2, size_t i,size_t opt)
{
// Merge the information
recv.template add_prp<typename T::value_type,
......@@ -549,10 +584,10 @@ struct op_ssend_recv_add
typename S,
template <typename> class layout_base,
int ... prp>
static void execute(D & recv,S & v2, size_t i)
static void execute(D & recv,S & v2, size_t i, size_t opt)
{
// Merge the information
op_ssend_recv_add_sr<sr>::template execute<T,D,S,layout_base,prp...>(recv,v2,i);
op_ssend_recv_add_sr<sr>::template execute<T,D,S,layout_base,prp...>(recv,v2,i,opt);
}
};
......
......@@ -10,6 +10,7 @@
#include "Grid/grid_util_test.hpp"
#include "data_type/aggregate.hpp"
#include "VCluster/cuda/VCluster_semantic_unit_tests_funcs.hpp"
//! Example structure
struct Aexample
......@@ -1631,92 +1632,12 @@ BOOST_AUTO_TEST_CASE (Vcluster_semantic_sendrecv_6)
}
}
void test_ssend_recv_layout_switch(size_t opt)
{
auto & v_cl = create_vcluster();
if (v_cl.size() > 10) {return;}
openfpm::vector<openfpm::vector_gpu_single<aggregate<float,float[3]>>> vd;
openfpm::vector_gpu<aggregate<float,float[3]>> collect;
openfpm::vector_gpu<aggregate<float,float[3]>> collect2;
openfpm::vector<size_t> prc_send;
openfpm::vector<size_t> prc_recv;
openfpm::vector<size_t> sz_recv;
vd.resize(v_cl.size());
for (size_t i = 0 ; i < vd.size() ; i++)
{
vd.get(i).resize(100);
for (size_t j = 0 ; j < vd.get(i).size() ; j++)
{
vd.get(i).template get<0>(j) = 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[0] = 400000 + 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[1] = 400000 + 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[2] = 400000 + 10000*i + v_cl.rank()*100 + j;
}
prc_send.add(i);
if (opt & MPI_GPU_DIRECT)
{vd.get(i).template hostToDevice<0,1>();}
}
v_cl.SSendRecv<openfpm::vector_gpu_single<aggregate<float,float[3]>>,decltype(collect),memory_traits_inte>
(vd,collect,prc_send, prc_recv,sz_recv,opt);
v_cl.SSendRecvP<openfpm::vector_gpu_single<aggregate<float,float[3]>>,decltype(collect),memory_traits_inte,0,1>
(vd,collect2,prc_send, prc_recv,sz_recv,opt);
// now we check what we received
// collect must have 100 * v_cl.size()
BOOST_REQUIRE_EQUAL(collect.size(),100*v_cl.size());
BOOST_REQUIRE_EQUAL(collect2.size(),100*v_cl.size());
// check what we received
bool match = true;
for (size_t i = 0 ; i < v_cl.size() ; i++)
{
if (opt & MPI_GPU_DIRECT)
{vd.get(i).template deviceToHost<0,1>();}
for (size_t j = 0 ; j < 100 ; j++)
{
match &= collect.template get<0>(i*100 +j) == v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[0] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[1] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[2] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<0>(i*100 +j) == v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[0] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[1] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[2] == 400000 + v_cl.rank()*10000 + i*100 + j;
}
if (match == false){break;}
}
BOOST_REQUIRE_EQUAL(match,true);
}
BOOST_AUTO_TEST_CASE( Vcluster_semantic_ssend_recv_layout_switch )
{
test_ssend_recv_layout_switch(0);
}
BOOST_AUTO_TEST_CASE( Vcluster_semantic_gpu_direct )
{
test_ssend_recv_layout_switch(MPI_GPU_DIRECT);
}
BOOST_AUTO_TEST_SUITE_END()
......
#include "config.h"
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "VCluster/VCluster.hpp"
#include "VCluster/cuda/VCluster_semantic_unit_tests_funcs.hpp"
void test_ssend_recv_layout_switch(size_t opt)
{
auto & v_cl = create_vcluster();
if (v_cl.size() > 10) {return;}
openfpm::vector<openfpm::vector_gpu_single<aggregate<float,float[3]>>> vd;
openfpm::vector_gpu<aggregate<float,float[3]>> collect;
openfpm::vector_gpu<aggregate<float,float[3]>> collect2;
openfpm::vector<size_t> prc_send;
openfpm::vector<size_t> prc_recv;
openfpm::vector<size_t> sz_recv;
vd.resize(v_cl.size());
for (size_t i = 0 ; i < vd.size() ; i++)
{
vd.get(i).resize(100);
for (size_t j = 0 ; j < vd.get(i).size() ; j++)
{
vd.get(i).template get<0>(j) = 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[0] = 400000 + 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[1] = 400000 + 10000*i + v_cl.rank()*100 + j;
vd.get(i).template get<1>(j)[2] = 400000 + 10000*i + v_cl.rank()*100 + j;
}
prc_send.add(i);
if (opt & MPI_GPU_DIRECT)
{
vd.get(i).template hostToDevice<0,1>();
// Reset host
for (size_t j = 0 ; j < vd.get(i).size() ; j++)
{
vd.get(i).template get<0>(j) = 0.0;
vd.get(i).template get<1>(j)[0] = 0.0;
vd.get(i).template get<1>(j)[1] = 0.0;
vd.get(i).template get<1>(j)[2] = 0.0;
}
}
}
v_cl.SSendRecv<openfpm::vector_gpu_single<aggregate<float,float[3]>>,decltype(collect),memory_traits_inte>
(vd,collect,prc_send, prc_recv,sz_recv,opt);
v_cl.SSendRecvP<openfpm::vector_gpu_single<aggregate<float,float[3]>>,decltype(collect),memory_traits_inte,0,1>
(vd,collect2,prc_send, prc_recv,sz_recv,opt);
// collect must have 100 * v_cl.size()
BOOST_REQUIRE_EQUAL(collect.size(),100*v_cl.size());
BOOST_REQUIRE_EQUAL(collect2.size(),100*v_cl.size());
// we reset the host collected data if data must be on device
if (opt & MPI_GPU_DIRECT)
{
for (size_t j = 0 ; j < collect.size() ; j++)
{
collect.template get<0>(j) = 0.0;
collect.template get<1>(j)[0] = 0.0;
collect.template get<1>(j)[1] = 0.0;
collect.template get<1>(j)[2] = 0.0;
collect2.template get<0>(j) = 0.0;
collect2.template get<1>(j)[0] = 0.0;
collect2.template get<1>(j)[1] = 0.0;
collect2.template get<1>(j)[2] = 0.0;
}
}
// from device to host
if (opt & MPI_GPU_DIRECT)
{
collect.template deviceToHost<0,1>();
collect2.template deviceToHost<0,1>();
}
// now we check what we received
// check what we received
bool match = true;
for (size_t i = 0 ; i < v_cl.size() ; i++)
{
for (size_t j = 0 ; j < 100 ; j++)
{
match &= collect.template get<0>(i*100 +j) == v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[0] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[1] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect.template get<1>(i*100 +j)[2] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<0>(i*100 +j) == v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[0] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[1] == 400000 + v_cl.rank()*10000 + i*100 + j;
match &= collect2.template get<1>(i*100 +j)[2] == 400000 + v_cl.rank()*10000 + i*100 + j;
}
if (match == false){break;}
}
BOOST_REQUIRE_EQUAL(match,true);
}
BOOST_AUTO_TEST_SUITE( VCluster_cuda_tests )
BOOST_AUTO_TEST_CASE( Vcluster_semantic_gpu_direct )
{
test_ssend_recv_layout_switch(MPI_GPU_DIRECT);
}
BOOST_AUTO_TEST_SUITE_END()
/*
* VCluster_semantic_unit_tests_funcs.hpp
*
* Created on: Aug 18, 2018
* Author: i-bird
*/
#ifndef VCLUSTER_SEMANTIC_UNIT_TESTS_FUNCS_HPP_
#define VCLUSTER_SEMANTIC_UNIT_TESTS_FUNCS_HPP_
void test_ssend_recv_layout_switch(size_t opt);
#endif /* VCLUSTER_SEMANTIC_UNIT_TESTS_FUNCS_HPP_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment