#ifndef STIM_CUDA_ARRAY_MULTIPLY_H #define STIM_CUDA_ARRAY_MULTIPLY_H #include #include #include namespace stim{ namespace cuda{ template __global__ void cuda_multiply(T* ptr1, T* ptr2, T* product, unsigned int N){ //calculate the 1D index for this thread int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N){ product[idx] = ptr1[idx] * ptr2[idx]; } } template void gpu_multiply(T* ptr1, T* ptr2, T* product, unsigned int N){ //get the maximum number of threads per block for the CUDA device int threads = stim::maxThreadsPerBlock(); //calculate the number of blocks int blocks = N / threads + 1; //call the kernel to do the multiplication cuda_multiply <<< blocks, threads >>>(ptr1, ptr2, product, N); } template void cpu_multiply(T* ptr1, T* ptr2, T* cpu_product, unsigned int N){ //allocate memory on the GPU for the array T* gpu_ptr1; T* gpu_ptr2; T* gpu_product; HANDLE_ERROR( cudaMalloc( &gpu_ptr1, N * sizeof(T) ) ); HANDLE_ERROR( cudaMalloc( &gpu_ptr2, N * sizeof(T) ) ); HANDLE_ERROR( cudaMalloc( &gpu_product, N * sizeof(T) ) ); //copy the array to the GPU HANDLE_ERROR( cudaMemcpy( gpu_ptr1, ptr1, N * sizeof(T), cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMemcpy( gpu_ptr2, ptr2, N * sizeof(T), cudaMemcpyHostToDevice) ); //call the GPU version of this function gpu_multiply(gpu_ptr1, gpu_ptr2 ,gpu_product, N); //copy the array back to the CPU HANDLE_ERROR( cudaMemcpy( cpu_product, gpu_product, N * sizeof(T), cudaMemcpyDeviceToHost) ); //free allocated memory cudaFree(gpu_ptr1); cudaFree(gpu_ptr2); cudaFree(gpu_product); } } } #endif