#ifndef STIM_CUDA_CONV2_H #define STIM_CUDA_CONV2_H //#define __CUDACC__ #ifdef __CUDACC__ #include #endif namespace stim { //Kernel function that performs the 2D convolution. template __global__ void kernel_conv2(T* out, T* in, T* kernel, size_t sx, size_t sy, size_t kx, size_t ky) { size_t xi = blockIdx.x * blockDim.x + threadIdx.x; //threads correspond to indices into the output image size_t yi = blockIdx.y * blockDim.y + threadIdx.y; size_t X = sx - kx + 1; //calculate the size of the output image size_t Y = sy - ky + 1; if (xi >= X || yi >= Y) return; //returns if the thread is outside of the output image //loop through the kernel size_t kxi, kyi; T v = 0; for (kyi = 0; kyi < ky; kyi++) { for (kxi = 0; kxi < kx; kxi++) { v += in[(yi + kyi) * sx + xi + kxi] * kernel[kyi * kx + kxi]; } } out[yi * X + xi] = v; //write the result to global memory } //Performs a convolution of a 2D image using the GPU. All pointers are assumed to be to memory on the current device. //@param out is a pointer to the output image //@param in is a pointer to the input image //@param sx is the size of the input image along X //@param sy is the size of the input image along Y //@param kx is the size of the kernel along X //@param ky is the size of the kernel along Y template void gpu_conv2(T* out, T* in, T* kernel, size_t sx, size_t sy, size_t kx, size_t ky) { cudaDeviceProp p; HANDLE_ERROR(cudaGetDeviceProperties(&p, 0)); size_t tmax = p.maxThreadsPerBlock; dim3 tn(sqrt(tmax), sqrt(tmax)); //calculate the block dimensions size_t X = sx - kx + 1; //calculate the size of the output image size_t Y = sy - ky + 1; dim3 bn(X / tn.x + 1, Y / tn.y + 1); //calculate the grid dimensions kernel_conv2 <<>> (out, in, kernel, sx, sy, kx, ky); //launch the kernel } //Performs a convolution of a 2D image. Only valid pixels based on the kernel are returned. // As a result, the output image will be smaller than the input image by (kx-1, ky-1) //@param out is a pointer to the output image //@param in is a pointer to the input image //@param sx is the size of the input image along X //@param sy is the size of the input image along Y //@param kx is the size of the kernel along X //@param ky is the size of the kernel along Y template void cpu_conv2(T* out, T* in, T* kernel, size_t sx, size_t sy, size_t kx, size_t ky) { size_t X = sx - kx + 1; //x size of the output image size_t Y = sy - ky + 1; //y size of the output image #ifdef __CUDACC__ //allocate memory and copy everything to the GPU T* gpu_in; HANDLE_ERROR(cudaMalloc(&gpu_in, sx * sy * sizeof(T))); HANDLE_ERROR(cudaMemcpy(gpu_in, in, sx * sy * sizeof(T), cudaMemcpyHostToDevice)); T* gpu_kernel; HANDLE_ERROR(cudaMalloc(&gpu_kernel, kx * ky * sizeof(T))); HANDLE_ERROR(cudaMemcpy(gpu_kernel, kernel, kx * ky * sizeof(T), cudaMemcpyHostToDevice)); T* gpu_out; HANDLE_ERROR(cudaMalloc(&gpu_out, X * Y * sizeof(T))); gpu_conv2(gpu_out, gpu_in, gpu_kernel, sx, sy, kx, ky); //execute the GPU kernel HANDLE_ERROR(cudaMemcpy(out, gpu_out, X * Y * sizeof(T), cudaMemcpyDeviceToHost)); //copy the result to the host HANDLE_ERROR(cudaFree(gpu_in)); HANDLE_ERROR(cudaFree(gpu_kernel)); HANDLE_ERROR(cudaFree(gpu_out)); #else T v; //register stores the integral of the current pixel value size_t yi, xi, kyi, kxi, yi_kyi_sx; for (yi = 0; yi < Y; yi++) { //for each pixel in the output image for (xi = 0; xi < X; xi++) { v = 0; for (kyi = 0; kyi < ky; kyi++) { //for each pixel in the kernel yi_kyi_sx = (yi + kyi) * sx; for (kxi = 0; kxi < kx; kxi++) { v += in[yi_kyi_sx + xi + kxi] * kernel[kyi * kx + kxi]; } } out[yi * X + xi] = v; //save the result to the output array } } #endif } } #endif