array_abs.cuh
1.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#ifndef STIM_CUDA_ARRAY_ABS_H
#define STIM_CUDA_ARRAY_ABS_H
namespace stim{
namespace cuda{
template<typename T>
__global__ void cuda_abs(T* a, unsigned int N){
//calculate the 1D index for this thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
a[i] = abs(a[i]);
}
template<typename T>
void gpu_abs(T* a, unsigned int N){
//get the maximum number of threads per block for the CUDA device
int threads = stim::maxThreadsPerBlock();
//calculate the number of blocks
int blocks = N / threads + (N%threads == 0 ? 0:1);
//call the kernel to do the multiplication
cuda_abs <<< blocks, threads >>>(a, N);
}
template<typename T>
void cpu_abs(T* a, unsigned int N){
//calculate the number of bytes in the array
unsigned int bytes = N * sizeof(T);
//allocate memory on the GPU for the array
T* gpuA;
HANDLE_ERROR( cudaMalloc(&gpuA, bytes) );
//copy the array to the GPU
HANDLE_ERROR( cudaMemcpy(gpuA, a, bytes, cudaMemcpyHostToDevice) );
//call the GPU version of this function
gpu_abs<T>(gpuA, N);
//copy the array back to the CPU
HANDLE_ERROR( cudaMemcpy(a, gpuA, bytes, cudaMemcpyDeviceToHost) );
//free allocated memory
cudaFree(gpuA);
}
} //end namespace cuda
} //end namespace stim
#endif