PeriDyno 1.0.0
Loading...
Searching...
No Matches
VkTransfer.inl
Go to the documentation of this file.
1#include <assert.h>
2#include "VkContext.h"
3
4namespace dyno
5{
6 template<typename T>
8 {
9 VkContext* ctx = src.currentContext();
10
11 assert(ctx != nullptr);
12 assert(dst.currentContext() == src.currentContext());
13 assert(dst.size() == src.size());
14
15 // Copy from staging buffer
16 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
17 VkBufferCopy copyRegion = {};
18 copyRegion.size = dst.size() * sizeof(T);
19 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
20
21/* VkBufferMemoryBarrier bufferBarrier = vks::initializers::bufferMemoryBarrier();
22 bufferBarrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
23 bufferBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
24 bufferBarrier.srcQueueFamilyIndex = ctx->queueFamilyIndices.compute;
25 bufferBarrier.dstQueueFamilyIndex = ctx->queueFamilyIndices.graphics;
26 bufferBarrier.size = VK_WHOLE_SIZE;
27 bufferBarrier.buffer = dst.bufferHandle();
28 std::vector<VkBufferMemoryBarrier> bufferBarriers;
29 bufferBarriers.push_back(bufferBarrier);
30
31 vkCmdPipelineBarrier(
32 copyCmd,
33 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
34 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
35 VK_FLAGS_NONE,
36 0, nullptr,
37 static_cast<uint32_t>(bufferBarriers.size()), bufferBarriers.data(),
38 0, nullptr);*/
39
40 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
41
42 return true;
43 }
44
45 template<typename T>
47 {
48 VkContext* ctx = src.currentContext();
49
50 assert(ctx != nullptr);
51 assert(dst.currentContext() == src.currentContext());
52 assert(dst.size() == src.size());
53
54 // Copy from staging buffer
55 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
56 VkBufferCopy copyRegion = {};
57 copyRegion.size = dst.size() * sizeof(T);
58 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
59
60 /* VkBufferMemoryBarrier bufferBarrier = vks::initializers::bufferMemoryBarrier();
61 bufferBarrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
62 bufferBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
63 bufferBarrier.srcQueueFamilyIndex = ctx->queueFamilyIndices.compute;
64 bufferBarrier.dstQueueFamilyIndex = ctx->queueFamilyIndices.graphics;
65 bufferBarrier.size = VK_WHOLE_SIZE;
66 bufferBarrier.buffer = dst.bufferHandle();
67 std::vector<VkBufferMemoryBarrier> bufferBarriers;
68 bufferBarriers.push_back(bufferBarrier);
69
70 vkCmdPipelineBarrier(
71 copyCmd,
72 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
73 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
74 VK_FLAGS_NONE,
75 0, nullptr,
76 static_cast<uint32_t>(bufferBarriers.size()), bufferBarriers.data(),
77 0, nullptr);*/
78
79 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
80
81 return true;
82 }
83
84// VkBufferMemoryBarrier barrier{ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER };
85// barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
86// barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
87// barrier.buffer = pair.dst;
88// barrier.size = pair.src.m_size;
89// vkCmdPipelineBarrier(cmd, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, 0, 1, &barrier, 0, 0);
90
91
92 template<typename T>
94 {
95 VkContext* ctx = src.currentContext();
96
97 assert(ctx != nullptr);
98 assert(dst.currentContext() == src.currentContext());
99 assert(dst.size() == src.size());
100
101 // Copy from staging buffer
102 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
103 VkBufferCopy copyRegion = {};
104 copyRegion.size = dst.size()*sizeof(T);
105 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
106
107 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
108
109 return true;
110 }
111
112 template<typename T>
113 bool vkTransfer(std::vector<T>& dst, const VkDeviceArray<T>& src)
114 {
115 VkContext* ctx = src.currentContext();
116
117 assert(ctx != nullptr);
118 assert(dst.size() == src.size());
119
120 VkHostArray<T> vkHostSrc;
121 vkHostSrc.resize(src.size());
122
123 vkTransfer(vkHostSrc, src);
124
125 memcpy(dst.data(), vkHostSrc.mapped(), sizeof(T)*src.size());
126
127 vkHostSrc.clear();
128
129 return true;
130 }
131
132 template<typename T>
133 bool vkTransfer(VkDeviceArray<T>& dst, const std::vector<T>& src)
134 {
135 VkContext* ctx = dst.currentContext();
136
137 assert(ctx != nullptr);
138 assert(dst.size() == src.size());
139
140 VkHostArray<T> vkHostSrc;
141 vkHostSrc.resize(src.size(), src.data());
142
143 // Copy from staging buffer
144 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
145 VkBufferCopy copyRegion = {};
146 copyRegion.size = dst.size() * sizeof(T);
147 vkCmdCopyBuffer(copyCmd, vkHostSrc.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
148
149 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
150
151 vkHostSrc.clear();
152
153 return true;
154 }
155
156
157 template<typename T>
159 {
160 VkContext* ctx = src.currentContext();
161
162 assert(ctx != nullptr);
163 assert(dst.currentContext() == src.currentContext());
164 assert(dst.size() == src.size());
165
166 // Copy from staging buffer
167 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
168 VkBufferCopy copyRegion = {};
169 copyRegion.size = dst.size() * sizeof(T);
170 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
171
172 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
173
174 return true;
175 }
176
177 template<typename T>
178 bool vkTransfer(VkDeviceArray<T>& dst, uint64_t dstOffset, const VkDeviceArray<T>& src, uint64_t srcOffset, uint64_t copySize)
179 {
180 if (copySize <= 0)
181 return false;
182
183 VkContext* ctx = src.currentContext();
184
185 assert(ctx != nullptr);
186 assert(dst.currentContext() == src.currentContext());
187 assert(dst.size() >= dstOffset + copySize);
188 assert(src.size() >= srcOffset + copySize);
189
190 // Copy from staging buffer
191 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
192 VkBufferCopy copyRegion = {};
193 copyRegion.dstOffset = dstOffset * sizeof(T);
194 copyRegion.srcOffset = srcOffset * sizeof(T);
195 copyRegion.size = src.size() * sizeof(T);
196 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
197
198 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
199
200 return true;
201 }
202
203 template<typename T>
204 bool vkTransfer(std::vector<T>& dst, const VkDeviceArray2D<T>& src)
205 {
206 VkContext* ctx = src.currentContext();
207
208 assert(ctx != nullptr);
209 assert(dst.size() == src.size());
210
211 VkHostArray<T> vkHostSrc;
212 vkHostSrc.resize(src.size());
213
214 vkTransfer(vkHostSrc, src);
215
216 memcpy(dst.data(), vkHostSrc.mapped(), sizeof(T)*src.size());
217
218 vkHostSrc.clear();
219
220 return true;
221 }
222
223 template<typename T>
224 bool vkTransfer(VkDeviceArray2D<T>& dst, const std::vector<T>& src)
225 {
226 VkContext* ctx = dst.currentContext();
227
228 assert(ctx != nullptr);
229 assert(dst.size() == src.size());
230
231 VkHostArray<T> vkHostSrc;
232 vkHostSrc.resize(src.size(), src.data());
233
234 // Copy from staging buffer
235 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
236 VkBufferCopy copyRegion = {};
237 copyRegion.size = dst.size() * sizeof(T);
238 vkCmdCopyBuffer(copyCmd, vkHostSrc.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
239
240 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
241
242 vkHostSrc.clear();
243
244 return true;
245 }
246
247 template<typename T>
249 {
250 VkContext* ctx = dst.currentContext();
251
252 assert(ctx != nullptr);
253 assert(dst.size() == src.size());
254
255 // Copy from staging buffer
256 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
257 VkBufferCopy copyRegion = {};
258 copyRegion.size = dst.size() * sizeof(T);
259 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
260
261 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
262
263 return true;
264 }
265
266
267 template<typename T>
268 bool vkTransfer(VkDeviceArray3D<T>& dst, const std::vector<T>& src)
269 {
270 VkContext* ctx = dst.currentContext();
271
272 assert(ctx != nullptr);
273 assert(dst.size() == src.size());
274
275 VkHostArray<T> vkHostSrc;
276 vkHostSrc.resize(src.size(), src.data());
277
278 // Copy from staging buffer
279 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
280 VkBufferCopy copyRegion = {};
281 copyRegion.size = dst.size() * sizeof(T);
282 vkCmdCopyBuffer(copyCmd, vkHostSrc.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
283
284 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
285
286 vkHostSrc.clear();
287
288 return true;
289 }
290
291 template<typename T>
293 {
294 VkContext* ctx = dst.currentContext();
295
296 assert(ctx != nullptr);
297 assert(dst.size() == src.size());
298
299 // Copy from staging buffer
300 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
301 VkBufferCopy copyRegion = {};
302 copyRegion.size = dst.size() * sizeof(T);
303 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
304
305 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
306
307 return true;
308 }
309
310 template<typename T>
312 {
313 VkContext* ctx = src.currentContext();
314
315 assert(ctx != nullptr);
316 assert(dst.currentContext() == src.currentContext());
317 assert(dst.size() == src.size());
318
319 // Copy from staging buffer
320 VkCommandBuffer copyCmd = ctx->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
321 VkBufferCopy copyRegion = {};
322 copyRegion.size = dst.size() * sizeof(T);
323 vkCmdCopyBuffer(copyCmd, src.bufferHandle(), dst.bufferHandle(), 1, &copyRegion);
324
325 ctx->flushCommandBuffer(copyCmd, ctx->graphicsQueueHandle(), true);
326
327 return true;
328 }
329
330 template<typename T>
331 bool vkTransfer(std::vector<T>& dst, const VkDeviceArray3D<T>& src)
332 {
333 VkContext* ctx = src.currentContext();
334
335 assert(ctx != nullptr);
336 assert(dst.size() == src.size());
337
338 VkHostArray<T> vkHostSrc;
339 vkHostSrc.resize(src.size());
340
341 vkTransfer(vkHostSrc, src);
342
343 memcpy(dst.data(), vkHostSrc.mapped(), sizeof(T)*src.size());
344
345 vkHostSrc.clear();
346
347 return true;
348 }
349
350
351}
assert(queueCount >=1)
void flushCommandBuffer(VkCommandBuffer commandBuffer, VkQueue queue, VkCommandPool pool, bool free=true)
VkCommandBuffer createCommandBuffer(VkCommandBufferLevel level, VkCommandPool pool, bool begin=false)
VkQueue graphicsQueueHandle()
Definition VkContext.h:27
uint32_t size() const
uint32_t size() const
uint32_t size() const
void resize(uint32_t num, const T *data=nullptr)
uint32_t size() const
Definition VkHostArray.h:17
VkBuffer bufferHandle() const
Definition VkVariable.h:37
VkContext * currentContext() const
Definition VkVariable.h:33
#define T(t)
This is an implementation of AdditiveCCD based on peridyno.
Definition Array.h:25
bool vkTransfer(VkHostArray< T > &dst, const VkDeviceArray< T > &src)
Definition VkTransfer.inl:7