#ifndef STIM_CUDA_GRADIENT_H #define STIM_CUDA_GRADIENT_H #include #include #include namespace stim{ namespace cuda{ template __global__ void gradient_2d(T* out, T* in, size_t x, size_t y){ // calculate the 2D coordinates for this current thread. size_t xi = blockIdx.x * blockDim.x + threadIdx.x; size_t yi = blockIdx.y * blockDim.y + threadIdx.y; // convert 2D coordinates to 1D size_t i = yi * x + xi; //return if the pixel is outside of the image if(xi >= x || yi >= y) return; //calculate indices for the forward difference size_t i_xp = yi * x + (xi + 1); size_t i_yp = (yi + 1) * x + xi; //calculate indices for the backward difference size_t i_xn = yi * x + (xi - 1); size_t i_yn = (yi - 1) * x + xi; //use forward differences if a coordinate is zero if(xi == 0) out[i * 2 + 0] = in[i_xp] - in[i]; else if (xi == x - 1) out[i * 2 + 0] = in[i] - in[i_xn]; else out[i * 2 + 0] = (in[i_xp] - in[i_xn]) / 2; if(yi == 0) out[i * 2 + 1] = in[i_yp] - in[i]; else if(yi == y-1) out[i * 2 + 1] = in[i] - in[i_yn]; else out[i * 2 + 1] = (in[i_yp] - in[i_yn]) / 2; } template void gpu_gradient_2d(T* gpuGrad, T* gpuI, size_t x, size_t y){ //get the maximum number of threads per block for the CUDA device unsigned int max_threads = stim::maxThreadsPerBlock(); dim3 threads(max_threads, 1); dim3 blocks((unsigned int)(x/threads.x) + 1 , (unsigned int)y); //call the GPU kernel to determine the gradient gradient_2d <<< blocks, threads >>>(gpuGrad, gpuI, x, y); } template void cpu_gradient_2d(T* out, T* in, unsigned int x, unsigned int y){ //get the number of pixels in the image unsigned int pixels = x * y; unsigned int bytes = pixels * sizeof(T); //allocate space on the GPU for the input image T* gpuIn; HANDLE_ERROR(cudaMalloc(&gpuIn, bytes)); //copy the image data to the GPU HANDLE_ERROR(cudaMemcpy(gpuIn, in, bytes, cudaMemcpyHostToDevice)); //allocate space on the GPU for the output gradient image T* gpuOut; cudaMalloc(&gpuOut, bytes * 2); //the output image will have two channels (x, y) //call the GPU version of this function gpu_gradient_2d(gpuOut, gpuIn, x, y); //copy the results to the CPU cudaMemcpy(out, gpuOut, bytes * 2, cudaMemcpyDeviceToHost); //free allocated memory cudaFree(gpuOut); cudaFree(gpuIn); } } } #endif