#ifndef STIM_CUDA_ARRAY_MULTIPLY_H #define STIM_CUDA_ARRAY_MULTIPLY_H #include #include #include namespace stim{ namespace cuda{ template __global__ void cuda_multiply(T* lhs, T rhs, unsigned int N){ //calculate the 1D index for this thread int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) lhs[i] *= rhs; } template void gpu_multiply(T* lhs, T rhs, unsigned int N){ //get the maximum number of threads per block for the CUDA device int threads = stim::maxThreadsPerBlock(); //calculate the number of blocks int blocks = N / threads + (N%threads == 0 ? 0:1); //call the kernel to do the multiplication cuda_multiply <<< blocks, threads >>>(lhs, rhs, N); } template void cpu_multiply(T* lhs, T rhs, unsigned int N){ //calculate the number of bytes in the array unsigned int bytes = N * sizeof(T); //allocate memory on the GPU for the array T* gpuLHS; HANDLE_ERROR( cudaMalloc(&gpuLHS, bytes) ); //copy the array to the GPU HANDLE_ERROR( cudaMemcpy(gpuLHS, lhs, bytes, cudaMemcpyHostToDevice) ); //call the GPU version of this function gpu_multiply(gpuLHS, rhs, N); //copy the array back to the CPU HANDLE_ERROR( cudaMemcpy(lhs, gpuLHS, bytes, cudaMemcpyDeviceToHost) ); //free allocated memory cudaFree(gpuLHS); } } } #endif