array_cart2polar.cuh 1.46 KB
#ifndef STIM_CUDA_ARRAY_CART2POLAR_H
#define STIM_CUDA_ARRAY_CART2POLAR_H

namespace stim{
	namespace cuda{
		template<typename T>
		__global__ void cuda_cart2polar(T* a, unsigned int N){

			
			//calculate the 1D index for this thread
			int i = blockIdx.x * blockDim.x + threadIdx.x;

			if(i < N){
				float x = a[i * 2 + 0];
				float y = a[i * 2 + 1];
				float theta = atan2( y, x ) ;
				float r = sqrt(x * x + y * y);
				a[i * 2 + 0] = theta;
				a[i * 2 + 1] = r;
			}
		}


		template<typename T>
		void gpu_cart2polar(T* gpuGrad, unsigned int N){

			//get the maximum number of threads per block for the CUDA device
			int threads = stim::maxThreadsPerBlock();

			//calculate the number of blocks
			int blocks = N / threads + (N % threads == 0 ? 0:1);

			//call the kernel to do the multiplication
			cuda_cart2polar <<< blocks, threads >>>(gpuGrad, N);

		}


		template<typename T>
		void cpu_cart2polar(T* a, unsigned int N){

			//calculate the number of bytes in the array
			unsigned int bytes = N * sizeof(T) * 2;

			//allocate memory on the GPU for the array
			T* gpuA;
			HANDLE_ERROR( cudaMalloc(&gpuA, bytes) );

			//copy the array to the GPU
			HANDLE_ERROR( cudaMemcpy(gpuA, a, bytes, cudaMemcpyHostToDevice) );

			//call the GPU version of this function
			gpu_cart2polar<T>(gpuA, N);

			//copy the array back to the CPU
			HANDLE_ERROR( cudaMemcpy(a, gpuA, bytes, cudaMemcpyDeviceToHost) );

			//free allocated memory
			cudaFree(gpuA);

		}

	}
}

#endif