#ifndef STIM_CUDA_ARRAY_DIVIDE_H #define STIM_CUDA_ARRAY_DIVIDE_H #include #include #include namespace stim{ namespace cuda{ template __global__ void cuda_divide(T* ptr1, T* ptr2, T* quotient, unsigned int N){ //calculate the 1D index for this thread int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N){ quotient[idx] = ptr1[idx] / ptr2[idx]; } } template void gpu_divide(T* ptr1, T* ptr2, T* quotient, unsigned int N){ //get the maximum number of threads per block for the CUDA device int threads = stim::maxThreadsPerBlock(); //calculate the number of blocks int blocks = N / threads + 1; //call the kernel to do the multiplication cuda_divide <<< blocks, threads >>>(ptr1, ptr2, quotient, N); } template void cpu_divide(T* ptr1, T* ptr2, T* cpu_quotient, unsigned int N){ //allocate memory on the GPU for the array T* gpu_ptr1; T* gpu_ptr2; T* gpu_quotient; HANDLE_ERROR( cudaMalloc( &gpu_ptr1, N * sizeof(T) ) ); HANDLE_ERROR( cudaMalloc( &gpu_ptr2, N * sizeof(T) ) ); HANDLE_ERROR( cudaMalloc( &gpu_quotient, N * sizeof(T) ) ); //copy the array to the GPU HANDLE_ERROR( cudaMemcpy( gpu_ptr1, ptr1, N * sizeof(T), cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpy( gpu_ptr2, ptr2, N * sizeof(T), cudaMemcpyHostToDevice) ); //call the GPU version of this function gpu_divide(gpu_ptr1, gpu_ptr2 ,gpu_quotient, N); //copy the array back to the CPU HANDLE_ERROR( cudaMemcpy( cpu_quotient, gpu_quotient, N * sizeof(T), cudaMemcpyDeviceToHost) ); //free allocated memory cudaFree(gpu_ptr1); cudaFree(gpu_ptr2); cudaFree(gpu_quotient); } } } #endif