array_cart2polar.cuh 1.62 KB
#ifndef STIM_CUDA_ARRAY_CART2POLAR_H
#define STIM_CUDA_ARRAY_CART2POLAR_H

namespace stim{
	namespace cuda{
		template<typename T>
		__global__ void cuda_cart2polar(T* a, int x, int y){

			
			// calculate the 2D coordinates for this current thread.
			int xi = blockIdx.x * blockDim.x + threadIdx.x;
			int yi = blockIdx.y * blockDim.y + threadIdx.y;
			// convert 2D coordinates to 1D
			int i = yi * x + xi;
			

			if(xi >= x|| yi >= y) return;

				float xl = a[i * 2 + 0];
				float yl = a[i * 2 + 1];
				float theta = atan2( yl, xl ) ;
				float r = sqrt(xl * xl + yl * yl);
				a[i * 2 + 0] = theta;
				a[i * 2 + 1] = r;
			
		}


		template<typename T>
		void gpu_cart2polar(T* gpuGrad, unsigned int x, unsigned int y){

			unsigned int max_threads = stim::maxThreadsPerBlock();
			dim3 threads(max_threads, 1);
			dim3 blocks(x/threads.x + (x %threads.x == 0 ? 0:1) , y);
			
			//call the kernel to do the multiplication
			cuda_cart2polar <<< blocks, threads >>>(gpuGrad, x, y);

		}


		template<typename T>
		void cpu_cart2polar(T* a, unsigned int x, unsigned int y){

			//calculate the number of bytes in the array
			unsigned int N = x *y;
			unsigned int bytes = N * sizeof(T) * 2;

			//allocate memory on the GPU for the array
			T* gpuA;
			HANDLE_ERROR( cudaMalloc(&gpuA, bytes) );

			//copy the array to the GPU
			HANDLE_ERROR( cudaMemcpy(gpuA, a, bytes, cudaMemcpyHostToDevice) );

			//call the GPU version of this function
			gpu_cart2polar<T>(gpuA, x, y);

			//copy the array back to the CPU
			HANDLE_ERROR( cudaMemcpy(a, gpuA, bytes, cudaMemcpyDeviceToHost) );

			//free allocated memory
			cudaFree(gpuA);

		}

	}
}

#endif