Grid 0.7.0
Lattice_slicesum_core.h
Go to the documentation of this file.
1#pragma once
2
3#if defined(GRID_CUDA)
4
5#include <cub/cub.cuh>
6#define gpucub cub
7#define gpuError_t cudaError_t
8#define gpuSuccess cudaSuccess
9
10#elif defined(GRID_HIP)
11
12#include <hipcub/hipcub.hpp>
13#define gpucub hipcub
14#define gpuError_t hipError_t
15#define gpuSuccess hipSuccess
16
17#endif
18
19
21
22
23#if defined(GRID_CUDA) || defined(GRID_HIP)
24template<class vobj>
25inline void sliceSumReduction_cub_small(const vobj *Data,
26 std::vector<vobj> &lvSum,
27 const int rd,
28 const int e1,
29 const int e2,
30 const int stride,
31 const int ostride,
32 const int Nsimd)
33{
34 size_t subvol_size = e1*e2;
35 deviceVector<vobj> reduction_buffer(rd*subvol_size);
36 auto rb_p = &reduction_buffer[0];
37 vobj zero_init;
38 zeroit(zero_init);
39
40
41 void *temp_storage_array = NULL;
42 size_t temp_storage_bytes = 0;
43 vobj *d_out;
44 int* d_offsets;
45
46 std::vector<int> offsets(rd+1,0);
47
48 for (int i = 0; i < offsets.size(); i++) {
49 offsets[i] = i*subvol_size;
50 }
51
52 //Allocate memory for output and offset arrays on device
53 d_out = static_cast<vobj*>(acceleratorAllocDevice(rd*sizeof(vobj)));
54
55 d_offsets = static_cast<int*>(acceleratorAllocDevice((rd+1)*sizeof(int)));
56
57 //copy offsets to device
58 acceleratorCopyToDeviceAsynch(&offsets[0],d_offsets,sizeof(int)*(rd+1),computeStream);
59
60
61 gpuError_t gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p,d_out, rd, d_offsets, d_offsets+1, ::gpucub::Sum(), zero_init, computeStream);
62 if (gpuErr!=gpuSuccess) {
63 std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce (setup)! Error: " << gpuErr <<std::endl;
64 exit(EXIT_FAILURE);
65 }
66
67 //allocate memory for temp_storage_array
68 temp_storage_array = acceleratorAllocDevice(temp_storage_bytes);
69
70 //prepare buffer for reduction
71 //use non-blocking accelerator_for to avoid syncs (ok because we submit to same computeStream)
72 //use 2d accelerator_for to avoid launch latencies found when serially looping over rd
73 accelerator_for2dNB( s,subvol_size, r,rd, Nsimd,{
74
75 int n = s / e2;
76 int b = s % e2;
77 int so=r*ostride; // base offset for start of plane
78 int ss= so+n*stride+b;
79
80 coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data[ss]));
81
82 });
83
84 //issue segmented reductions in computeStream
85 gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p, d_out, rd, d_offsets, d_offsets+1,::gpucub::Sum(), zero_init, computeStream);
86 if (gpuErr!=gpuSuccess) {
87 std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce! Error: " << gpuErr <<std::endl;
88 exit(EXIT_FAILURE);
89 }
90
91 acceleratorCopyFromDeviceAsynch(d_out,&lvSum[0],rd*sizeof(vobj),computeStream);
92
93 //sync after copy
95
96 acceleratorFreeDevice(temp_storage_array);
98 acceleratorFreeDevice(d_offsets);
99
100
101}
102#endif
103
104
105#if defined(GRID_SYCL)
106template<class vobj>
107inline void sliceSumReduction_sycl_small(const vobj *Data,
108 std::vector <vobj> &lvSum,
109 const int &rd,
110 const int &e1,
111 const int &e2,
112 const int &stride,
113 const int &ostride,
114 const int &Nsimd)
115{
116 size_t subvol_size = e1*e2;
117
118 vobj *mysum = (vobj *) malloc_shared(rd*sizeof(vobj),*theGridAccelerator);
119 vobj vobj_zero;
120 zeroit(vobj_zero);
121 for (int r = 0; r<rd; r++) {
122 mysum[r] = vobj_zero;
123 }
124
125 deviceVector<vobj> reduction_buffer(rd*subvol_size);
126
127 auto rb_p = &reduction_buffer[0];
128
129 // autoView(Data_v, Data, AcceleratorRead);
130
131 //prepare reduction buffer
132 accelerator_for2d( s,subvol_size, r,rd, (size_t)Nsimd,{
133
134 int n = s / e2;
135 int b = s % e2;
136 int so=r*ostride; // base offset for start of plane
137 int ss= so+n*stride+b;
138
139 coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data[ss]));
140
141 });
142
143 for (int r = 0; r < rd; r++) {
144 theGridAccelerator->submit([&](sycl::handler &cgh) {
145 auto Reduction = sycl::reduction(&mysum[r],std::plus<>());
146 cgh.parallel_for(sycl::range<1>{subvol_size},
147 Reduction,
148 [=](sycl::id<1> item, auto &sum) {
149 auto s = item[0];
150 sum += rb_p[r*subvol_size+s];
151 });
152 });
153
154
155 }
156 theGridAccelerator->wait();
157 for (int r = 0; r < rd; r++) {
158 lvSum[r] = mysum[r];
159 }
160 free(mysum,*theGridAccelerator);
161}
162#endif
163
164template<class vobj>
165inline void sliceSumReduction_large(const vobj *Data,
166 std::vector<vobj> &lvSum,
167 const int rd,
168 const int e1,
169 const int e2,
170 const int stride,
171 const int ostride,
172 const int Nsimd)
173{
174 typedef typename vobj::vector_type vector;
175 const int words = sizeof(vobj)/sizeof(vector);
176 const int osites = rd*e1*e2;
177 deviceVector<vector>buffer(osites);
178 vector *dat = (vector *)Data;
179 vector *buf = &buffer[0];
180 std::vector<vector> lvSum_small(rd);
181 vector *lvSum_ptr = (vector *)&lvSum[0];
182
183 for (int w = 0; w < words; w++) {
184 accelerator_for(ss,osites,1,{
185 buf[ss] = dat[ss*words+w];
186 });
187
188 #if defined(GRID_CUDA) || defined(GRID_HIP)
189 sliceSumReduction_cub_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd);
190 #elif defined(GRID_SYCL)
191 sliceSumReduction_sycl_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd);
192 #endif
193
194 for (int r = 0; r < rd; r++) {
195 lvSum_ptr[w+words*r]=lvSum_small[r];
196 }
197 }
198}
199
200template<class vobj>
201inline void sliceSumReduction_gpu(const Lattice<vobj> &Data,
202 std::vector<vobj> &lvSum,
203 const int rd,
204 const int e1,
205 const int e2,
206 const int stride,
207 const int ostride,
208 const int Nsimd)
209{
210 autoView(Data_v, Data, AcceleratorRead); //reduction libraries cannot deal with large vobjs so we split into small/large case.
211 if constexpr (sizeof(vobj) <= 256) {
212
213 #if defined(GRID_CUDA) || defined(GRID_HIP)
214 sliceSumReduction_cub_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
215 #elif defined (GRID_SYCL)
216 sliceSumReduction_sycl_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
217 #endif
218
219 }
220 else {
221 sliceSumReduction_large(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
222 }
223}
224
225
226template<class vobj>
227inline void sliceSumReduction_cpu(const Lattice<vobj> &Data,
228 std::vector<vobj> &lvSum,
229 const int &rd,
230 const int &e1,
231 const int &e2,
232 const int &stride,
233 const int &ostride,
234 const int &Nsimd)
235{
236 // sum over reduced dimension planes, breaking out orthog dir
237 // Parallel over orthog direction
238 autoView( Data_v, Data, CpuRead);
239 thread_for( r,rd, {
240 int so=r*ostride; // base offset for start of plane
241 for(int n=0;n<e1;n++){
242 for(int b=0;b<e2;b++){
243 int ss= so+n*stride+b;
244 lvSum[r]=lvSum[r]+Data_v[ss];
245 }
246 }
247 });
248}
249
250template<class vobj> inline void sliceSumReduction(const Lattice<vobj> &Data,
251 std::vector<vobj> &lvSum,
252 const int &rd,
253 const int &e1,
254 const int &e2,
255 const int &stride,
256 const int &ostride,
257 const int &Nsimd)
258{
259#if defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)
260 sliceSumReduction_gpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
261#else
262 sliceSumReduction_cpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
263#endif
264}
265
266
void * acceleratorAllocDevice(size_t bytes)
acceleratorEvent_t acceleratorCopyFromDeviceAsynch(void *from, void *to, size_t bytes)
#define accelerator_for(iterator, num, nsimd,...)
#define accelerator_for2d(iter1, num1, iter2, num2, nsimd,...)
void acceleratorFreeDevice(void *ptr)
#define accelerator_barrier(dummy)
acceleratorEvent_t acceleratorCopyToDeviceAsynch(void *from, void *to, size_t bytes)
std::vector< T, devAllocator< T > > deviceVector
accelerator_inline void zeroit(Grid_simd2< S, V > &z)
vobj::scalar_object sum(const vobj *arg, Integer osites)
void sliceSumReduction(const Lattice< vobj > &Data, std::vector< vobj > &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
void sliceSumReduction_cpu(const Lattice< vobj > &Data, std::vector< vobj > &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
void sliceSumReduction_large(const vobj *Data, std::vector< vobj > &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd)
void sliceSumReduction_gpu(const Lattice< vobj > &Data, std::vector< vobj > &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd)
#define autoView(l_v, l, mode)
GridLogger GridLogError(1, "Error", GridLogColours, "RED")
@ AcceleratorRead
@ CpuRead
#define NAMESPACE_BEGIN(A)
Definition Namespace.h:35
#define NAMESPACE_END(A)
Definition Namespace.h:36
accelerator_inline void coalescedWrite(vobj &__restrict__ vec, const vobj &__restrict__ extracted, int lane=0)
Definition Tensor_SIMT.h:87
accelerator_inline vobj coalescedRead(const vobj &__restrict__ vec, int lane=0)
Definition Tensor_SIMT.h:61
#define thread_for(i, num,...)
Definition Threads.h:60