6 if (m_nx != src.size() || m_ny != src.size()) {
7 this->resize(src.nx(), src.ny());
10 cuSafeCall(cudaMemcpy2D(m_data.data(),
sizeof(
T) * m_nx, src.begin(), src.pitch(),
sizeof(
T) * src.nx(), src.ny(), cudaMemcpyDeviceToHost));
35 inline T* begin()
const {
return m_data; }
37 DYN_FUNC
inline uint nx()
const {
return m_nx; }
38 DYN_FUNC
inline uint ny()
const {
return m_ny; }
39 DYN_FUNC
inline uint pitch()
const {
return m_pitch; }
41 GPU_FUNC
inline T operator () (
const uint i,
const uint j)
const
43 char* addr = (
char*)m_data;
50 GPU_FUNC
inline T& operator () (
const uint i,
const uint j)
52 char* addr = (
char*)m_data;
60 DYN_FUNC
inline int index(
const uint i,
const uint j)
const
65 GPU_FUNC
inline T operator [] (
const uint id)
const
70 GPU_FUNC
inline T& operator [] (
const uint id)
75 DYN_FUNC
inline uint size()
const {
return m_nx * m_ny; }
76 DYN_FUNC
inline bool isCPU()
const {
return false; }
77 DYN_FUNC
inline bool isGPU()
const {
return true; }
79 void assign(
const Array2D<T, DeviceType::GPU>& src);
80 void assign(
const Array2D<T, DeviceType::CPU>& src);
95 if (
nullptr != m_data) clear();
97 cuSafeCall(cudaMallocPitch((
void**)&m_data, (
size_t*)&m_pitch, (
size_t)
sizeof(
T) * nx, (
size_t)ny));
104 void Array2D<T, DeviceType::GPU>::reset()
106 cuSafeCall(cudaMemset((
void*)m_data, 0, m_pitch * m_ny));
112 if (m_data !=
nullptr)
113 cuSafeCall(cudaFree((
void*)m_data));
124 if (m_nx != src.nx() || m_ny != src.ny()){
125 this->resize(src.nx(), src.ny());
128 cuSafeCall(cudaMemcpy2D(m_data, m_pitch, src.begin(), src.pitch(),
sizeof(
T) * src.nx(), src.ny(), cudaMemcpyDeviceToDevice));
134 if (m_nx != src.nx() || m_ny != src.ny()) {
135 this->resize(src.nx(), src.ny());
138 cuSafeCall(cudaMemcpy2D(m_data, m_pitch, src.begin(),
sizeof(
T) *src.nx(),
sizeof(
T) * src.nx(), src.ny(), cudaMemcpyHostToDevice));
This is an implementation of AdditiveCCD based on peridyno.
Array2D< T, DeviceType::GPU > DArray2D