scalarfield.h 20.7 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
#ifndef STIM_SCALARFIELD_H
#define STIM_SCALARFIELD_H


#include "../math/rect.h"
#include "../math/complex.h"
#include "../math/fft.h"

#ifdef CUDA_FOUND
#include "../cuda/crop.cuh"
#endif

namespace stim{

	template<typename T>
	__global__ void cuda_abs(T* img, stim::complex<T>* field, size_t N){
		size_t i = blockIdx.x * blockDim.x + threadIdx.x;
		if(i >= N) return;

		img[i] = field[i].abs();
	}

	template<typename T>
	__global__ void cuda_real(T* img, stim::complex<T>* field, size_t N){
		size_t i = blockIdx.x * blockDim.x + threadIdx.x;
		if(i >= N) return;

		img[i] = field[i].real();
	}

	template<typename T>
	__global__ void cuda_imag(T* img, stim::complex<T>* field, size_t N){
		size_t i = blockIdx.x * blockDim.x + threadIdx.x;
		if(i >= N) return;

		img[i] = field[i].imag();
	}

	template<typename T>
	__global__ void cuda_intensity(T* img, stim::complex<T>* field, size_t N){
		size_t i = blockIdx.x * blockDim.x + threadIdx.x;
		if(i >= N) return;

		img[i] = pow(field[i].abs(), 2);
	}

	template<typename T>
	__global__ void cuda_sum_intensity(T* img, stim::complex<T>* field, size_t N){
		size_t i = blockIdx.x * blockDim.x + threadIdx.x;
		if(i >= N) return;

		img[i] += pow(field[i].abs(), 2);
	}

	/// Perform a k-space transform of a scalar field (FFT). The given field has a width of x and the calculated momentum space has a
	///		width of kx (in radians).
	/// @param K is a pointer to the output array of all plane waves in the field
	/// @param kx is the width of the frame in momentum space
	/// @param ky is the height of the frame in momentum space
	/// @param E is the field to be transformed
	/// @param x is the width of the field in the spatial domain
	/// @param y is the height of the field in the spatial domain
	/// @param nx is the number of pixels representing the field in the x (and kx) direction
	/// @param ny is the number of pixels representing the field in the y (and ky) direction
	template<typename T>
	void cpu_scalar_to_kspace(stim::complex<T>* K, T& kx, T& ky, stim::complex<T>* E, T x, T y, size_t nx, size_t ny){

		kx = stim::TAU * nx / x;			//calculate the width of the momentum space
		ky = stim::TAU * ny / y;

		stim::complex<T>* dev_FFT;
		HANDLE_ERROR( cudaMalloc(&dev_FFT, sizeof(stim::complex<T>) * nx * ny) );		//allocate space on the CUDA device for the output array

		stim::complex<T>* dev_E;
		HANDLE_ERROR( cudaMalloc(&dev_E, sizeof(stim::complex<T>) * nx * ny) );		//allocate space for the field
		HANDLE_ERROR( cudaMemcpy(dev_E, E, sizeof(stim::complex<T>) * nx * ny, cudaMemcpyHostToDevice) );	//copy the field to GPU memory

		cufftResult result;
		cufftHandle plan;
		result = cufftPlan2d(&plan, nx, ny, CUFFT_C2C);
		if(result != CUFFT_SUCCESS){
			std::cout<<"Error creating cuFFT plan."<<std::endl;
			exit(1);
		}

		result = cufftExecC2C(plan, (cufftComplex*)dev_E, (cufftComplex*)dev_FFT, CUFFT_FORWARD);
		if(result != CUFFT_SUCCESS){
			std::cout<<"Error using cuFFT to perform a forward Fourier transform of the field."<<std::endl;
			exit(1);
		}
		cufftDestroy(plan);

		stim::complex<T>* fft = (stim::complex<T>*) malloc(sizeof(stim::complex<T>) * nx * ny);
		HANDLE_ERROR( cudaMemcpy(fft, dev_FFT, sizeof(stim::complex<T>) * nx * ny, cudaMemcpyDeviceToHost) );

		stim::cpu_fftshift(K, fft, nx, ny);
		
		HANDLE_ERROR( cudaFree(dev_FFT) );			//free GPU memory
		HANDLE_ERROR( cudaFree(dev_E) );
		free(fft);									//free CPU memory
	}

	template<typename T>
	void cpu_scalar_from_kspace(stim::complex<T>* E, T& x, T& y, stim::complex<T>* K, T kx, T ky, size_t nx, size_t ny){

		x = stim::TAU * nx / kx;			//calculate the width of the momentum space
		y = stim::TAU * ny / ky;
		
		stim::complex<T>* fft = (stim::complex<T>*) malloc(sizeof(stim::complex<T>) * nx * ny);
		stim::cpu_ifftshift(fft, K, nx, ny);
		//memcpy(fft, K, sizeof(stim::complex<T>) * nx * ny);

		stim::complex<T>* dev_FFT;
		HANDLE_ERROR( cudaMalloc(&dev_FFT, sizeof(stim::complex<T>) * nx * ny) );		//allocate space on the CUDA device for the output array
		HANDLE_ERROR( cudaMemcpy(dev_FFT, fft, sizeof(stim::complex<T>) * nx * ny, cudaMemcpyHostToDevice) );	//copy the field to GPU memory

		stim::complex<T>* dev_E;
		HANDLE_ERROR( cudaMalloc(&dev_E, sizeof(stim::complex<T>) * nx * ny) );		//allocate space for the field

		cufftResult result;
		cufftHandle plan;
		result = cufftPlan2d(&plan, nx, ny, CUFFT_C2C);
		if(result != CUFFT_SUCCESS){
			std::cout<<"Error creating cuFFT plan."<<std::endl;
			exit(1);
		}

		result = cufftExecC2C(plan, (cufftComplex*)dev_FFT, (cufftComplex*)dev_E, CUFFT_INVERSE);
		if(result != CUFFT_SUCCESS){
			std::cout<<"Error using cuFFT to perform a forward Fourier transform of the field."<<std::endl;
			exit(1);
		}
		cufftDestroy(plan);

		HANDLE_ERROR( cudaMemcpy(E, dev_E, sizeof(stim::complex<T>) * nx * ny, cudaMemcpyDeviceToHost) );

		HANDLE_ERROR( cudaFree(dev_FFT) );			//free GPU memory
		HANDLE_ERROR( cudaFree(dev_E) );
		free(fft);									//free CPU memory
		
	}

	

	/// Propagate a field slice along its orthogonal direction by a given distance z
	/// @param Enew is the resulting propogated field
	/// @param E is the field to be propogated
	/// @param sx is the size of the field in the lateral x direction
	/// @param sy is the size of the field in the lateral y direction
	/// @param z is the distance to be propagated
	/// @param k is the wavenumber 2*pi/lambda
	/// @param nx is the number of samples in the field along the lateral x direction
	/// @param ny is the number of samples in the field along the lateral y direction
	template<typename T>
	void cpu_scalar_propagate(stim::complex<T>* Enew, stim::complex<T>* E, T sx, T sy, T z, T k, size_t nx, size_t ny){
		
		stim::complex<T>* K = (stim::complex<T>*) malloc( sizeof(stim::complex<T>) * nx * ny );

		T Kx, Ky;											//width and height in k space
		cpu_scalar_to_kspace(K, Kx, Ky, E ,sx, sy, nx, ny);

		//T* mag = (T*) malloc( sizeof(T) * nx * ny );
		//stim::abs(mag, K, nx * ny);
		//stim::cpu2image<float>(mag, "kspace_pre_shift.bmp", nx, ny, stim::cmBrewer);
		
		size_t kxi, kyi;
		size_t i;
		T kx, kx_sq, ky, ky_sq, k_sq;
		T kz;
		stim::complex<T> shift;
		T min_kx = -Kx / 2;
		T dkx = Kx / (nx);

		T min_ky = -Ky / 2;
		T dky = Ky / (ny);

		for(kyi = 0; kyi < ny; kyi++){						//for each plane wave in the ky direction
			for(kxi = 0; kxi < nx; kxi++){					//for each plane wave in the ky direction
				i = kyi * nx + kxi;

				kx = min_kx + kxi * dkx;					//calculate the position of the current plane wave
				ky = min_ky + kyi * dky;

				kx_sq = kx * kx;
				ky_sq = ky * ky;
				k_sq = k*k;
				
				if(kx_sq + ky_sq < k_sq){
					kz = sqrt(k_sq - kx_sq - ky_sq);			//estimate kz using the Fresnel approximation				
					shift = -exp(stim::complex<T>(0, kz * z));
					K[i] *= shift;
					K[i] /= (nx*ny);							//normalize the DFT
				}
				else{
					K[i] = 0;
				}
			}
		}
		
		//stim::abs(mag, K, nx * ny);
		//stim::cpu2image<float>(mag, "kspace_post_shift.bmp", nx, ny, stim::cmBrewer);
		
		cpu_scalar_from_kspace(Enew, sx, sy, K, Kx, Ky, nx, ny);
		free(K);
	}

	template<typename T>
	void gpu_scalar_propagate(stim::complex<T>* Enew, stim::complex<T>* E, T sx, T sy, T z, T k, size_t nx, size_t ny){
		
		size_t field_bytes = sizeof(stim::complex<T>) * nx * ny;
		stim::complex<T>* host_E = (stim::complex<T>*) malloc( field_bytes);
		HANDLE_ERROR( cudaMemcpy(host_E, E, field_bytes, cudaMemcpyDeviceToHost) );

		stim::complex<T>* host_Enew = (stim::complex<T>*) malloc(field_bytes);

		cpu_scalar_propagate(host_Enew, host_E, sx, sy, z, k, nx, ny);

		HANDLE_ERROR( cudaMemcpy(Enew, host_Enew, field_bytes, cudaMemcpyHostToDevice) );
		free(host_E);
		free(host_Enew);
	}

	/// Apply a lowpass filter to a field slice
	/// @param Enew is the resulting propogated field
	/// @param E is the field to be propogated
	/// @param sx is the size of the field in the lateral x direction
	/// @param sy is the size of the field in the lateral y direction
	/// @param highest is the highest spatial frequency that can pass through the filter
	/// @param nx is the number of samples in the field along the lateral x direction
	/// @param ny is the number of samples in the field along the lateral y direction
	template<typename T>
	void cpu_scalar_lowpass(stim::complex<T>* Enew, stim::complex<T>* E, T sx, T sy, T highest, size_t nx, size_t ny){
		
		stim::complex<T>* K = (stim::complex<T>*) malloc( sizeof(stim::complex<T>) * nx * ny );

		T Kx, Ky;											//width and height in k space
		cpu_scalar_to_kspace(K, Kx, Ky, E ,sx, sy, nx, ny);

		//T* mag = (T*) malloc( sizeof(T) * nx * ny );
		//stim::abs(mag, K, nx * ny);
		//stim::cpu2image<float>(mag, "kspace_pre_lowpass.bmp", nx, ny, stim::cmBrewer);
		
		size_t kxi, kyi;
		size_t i;
		T kx, kx_sq, ky, ky_sq, k_sq;
		T kz;
		stim::complex<T> shift;
		T min_kx = -Kx / 2;
		T dkx = Kx / (nx);

		T min_ky = -Ky / 2;
		T dky = Ky / (ny);

		T highest_sq = highest * highest;

		for(kyi = 0; kyi < ny; kyi++){						//for each plane wave in the ky direction
			for(kxi = 0; kxi < nx; kxi++){					//for each plane wave in the ky direction
				i = kyi * nx + kxi;

				kx = min_kx + kxi * dkx;					//calculate the position of the current plane wave
				ky = min_ky + kyi * dky;

				kx_sq = kx * kx;
				ky_sq = ky * ky;
				
				if(kx_sq + ky_sq > highest_sq){
					K[i] = 0;
				}
				else
					K[i] /= nx * ny;						//normalize the DFT
			}
		}
		
		//stim::abs(mag, K, nx * ny);
		//stim::cpu2image<float>(mag, "kspace_post_lowpass.bmp", nx, ny, stim::cmBrewer);
		
		cpu_scalar_from_kspace(Enew, sx, sy, K, Kx, Ky, nx, ny);
		free(K);
	}

	enum locationType {CPUmem, GPUmem};

	/// Class represents a scalar optical field.

	/// In general, this class is designed to operate between the CPU and GPU. So, make sure all functions have an option to create the output on either.
	///		The field is stored *either* on the GPU or host memory, but not both. This enforces that there can't be different copies of the same field.
	///		This class is designed to be included in all of the other scalar optics classes, allowing them to render output data so make sure to keep it general and compatible.

template<typename T>
class scalarfield : public rect<T>{

protected:
	stim::complex<T>* E;
	size_t R[2];
	locationType loc;
	using rect<T>::X;
	using rect<T>::Y;

	T* p[3];											//scalar position for each point in E

	/// Convert the field to a k-space representation (do an FFT)
	void to_kspace(T& kx, T& ky){
		cpu_scalar_to_kspace(E, kx, ky, E, X.len(), Y.len(), R[0], R[1]);
	}

	void from_kspace(){
		kx = stim::TAU * R[0] / X.len();			//calculate the width of the momentum space
		ky = stim::TAU * R[1] / Y.len();
		T x, y;
		cpu_scalar_from_kspace(E, x, y, E, kx, ky, R[0], R[1]);
	}

public:

	/// Returns the number of values in the field
	CUDA_CALLABLE size_t size(){
		return R[0] * R[1];
	}

	CUDA_CALLABLE size_t grid_bytes(){
		return sizeof(stim::complex<T>) * R[0] * R[1];
	}

	scalarfield(size_t X, size_t Y, T size = 1, T z_pos = 0) : rect<T>::rect(size, z_pos){
		R[0] = X;											//set the field resolution
		R[1] = Y;

		E = (stim::complex<T>*) malloc(grid_bytes());		//allocate in CPU memory
		memset(E, 0, grid_bytes());
		loc = CPUmem;

		p[0] = p[1] = p[2] = NULL;							//set the position vector to NULL

	}

	~scalarfield(){
		if(loc == CPUmem) free(E);
		else cudaFree(E);
	}	

	/// Calculates the distance between points on the grid
	T spacing(){
		T du = rect<T>::X.len() / R[0];
		T dv = rect<T>::Y.len() / R[1];
		return min<T>(du, dv);
	}

	/// Copy the field array to the GPU, if it isn't already there
	void to_gpu(){
		if(loc == GPUmem) return;
		else{
			stim::complex<T>* dev_E;
			HANDLE_ERROR( cudaMalloc(&dev_E, grid_bytes()) );								//allocate GPU memory
			HANDLE_ERROR( cudaMemcpy(dev_E, E, grid_bytes(), cudaMemcpyHostToDevice) );	//copy the field to the GPU
			free(E);																	//free the CPU memory
			E = dev_E;																	//swap pointers

			if(p[0]){
				size_t meshgrid_bytes = size() * sizeof(T);								//calculate the number of bytes in each meshgrid
				T* dev_X;																//allocate variables to store the device meshgrid
				T* dev_Y;
				T* dev_Z;
				HANDLE_ERROR( cudaMalloc(&dev_X, meshgrid_bytes) );						//allocate space for the meshgrid on the device
				HANDLE_ERROR( cudaMalloc(&dev_Y, meshgrid_bytes) );
				HANDLE_ERROR( cudaMalloc(&dev_Z, meshgrid_bytes) );

				HANDLE_ERROR( cudaMemcpy(dev_X, p[0], meshgrid_bytes, cudaMemcpyHostToDevice) );	//copy from the host to the device
				HANDLE_ERROR( cudaMemcpy(dev_Y, p[1], meshgrid_bytes, cudaMemcpyHostToDevice) );
				HANDLE_ERROR( cudaMemcpy(dev_Z, p[2], meshgrid_bytes, cudaMemcpyHostToDevice) );

				free(p[0]);																//free device memory
				free(p[1]);
				free(p[2]);

				p[0] = dev_X;															//swap in the new pointers to device memory
				p[1] = dev_Y;
				p[2] = dev_Z;
			}
			loc = GPUmem;																//set the location flag
		}

	}

	/// Copy the field array to the CPU, if it isn't already there
	void to_cpu(){
		if(loc == CPUmem) return;
		else{
			stim::complex<T>* host_E = (stim::complex<T>*) malloc(grid_bytes());			//allocate space in main memory
			HANDLE_ERROR( cudaMemcpy(host_E, E, grid_bytes(), cudaMemcpyDeviceToHost) );	//copy from GPU to CPU
			HANDLE_ERROR( cudaFree(E) );												//free device memory
			E = host_E;																	//swap pointers

			//copy a meshgrid has been created
			if(p[0]){
				size_t meshgrid_bytes = size() * sizeof(T);								//move X to the CPU
				T* host_X = (T*) malloc( meshgrid_bytes );
				T* host_Y = (T*) malloc( meshgrid_bytes );
				T* host_Z = (T*) malloc( meshgrid_bytes );

				HANDLE_ERROR( cudaMemcpy(host_X, p[0], meshgrid_bytes, cudaMemcpyDeviceToHost) );
				HANDLE_ERROR( cudaMemcpy(host_Y, p[1], meshgrid_bytes, cudaMemcpyDeviceToHost) );
				HANDLE_ERROR( cudaMemcpy(host_Z, p[2], meshgrid_bytes, cudaMemcpyDeviceToHost) );

				HANDLE_ERROR( cudaFree(p[0]) );
				HANDLE_ERROR( cudaFree(p[1]) );
				HANDLE_ERROR( cudaFree(p[2]) );

				p[0] = host_X;
				p[1] = host_Y;
				p[2] = host_Z;
			}
			loc = CPUmem;
		}
	}

	bool gpu(){
		if(loc == GPUmem) return true;
		else return false;
	}

	/// Propagate the field along its orthogonal direction by a distance d
	void propagate(T d, T k){
		if(loc == CPUmem){
			cpu_scalar_propagate(E, E, X.len(), Y.len(), d, k, R[0], R[1]);
		}
		else{
			gpu_scalar_propagate(E, E, X.len(), Y.len(), d, k, R[0], R[1]);
		}
	}

	/// Propagate the field along its orthogonal direction by a distance d
	void lowpass(T highest){
		cpu_scalar_lowpass(E, E, X.len(), Y.len(), highest, R[0], R[1]);
	}

	/// Crop an image based on a given padding parameter (crop out the center)
	void crop(size_t padding, stim::scalarfield<T>& cropped){
		size_t Cx = R[0] / (2 * padding + 1);										//calculate the size of the cropped image based on the padding value
		size_t Cy = R[1] / (2 * padding + 1);

		if(cropped.R[0] != Cx || cropped.R[1] != Cy){
			std::cout<<"Error: cropped field resolution ("<<cropped.R[0]<<" x "<<cropped.R[1]<<") does not match the required resolution ("<<Cx<<" x "<<Cy<<")."<<std::endl;
			exit(1);
		}

		if(loc == CPUmem){
			cropped.to_cpu();										//make sure that the cropped image is on the CPU
			size_t x, y;
			size_t sx, sy, si, di;
			for(y = 0; y < Cy; y++){
				sy = y + Cy * padding;								//calculate the y-index into the source image
				for(x = 0; x < Cx; x++){
					sx = x + Cx * padding;							//calculate the x-index into the source image
					si = sy * R[0] + sx;							//calculate the 1D index into the source image
					di = y * Cx + x;
					cropped.E[di] = E[si];
				}
			}
		}
		else{
			cropped.to_gpu();										//make sure that the cropped image is also on the GPU
			gpu_crop2d<stim::complex<T>>(cropped.E, E, R[0], R[1], Cx * padding, Cy * padding, Cx, Cy);
		}
	}

	std::string str(){
		std::stringstream ss;
		ss<<rect<T>::str()<<std::endl;
		ss<<"[ "<<R[0]<<" x "<<R[1]<<" ]"<<std::endl;
		ss<<"location: ";
		if(loc == CPUmem) ss<<"CPU";
		else ss<<"GPU";

		ss<<endl;
		return ss.str();
	}

	stim::complex<T>* ptr(){
		return E;
	}

	T* x(){ return p[0]; }
	T* y(){ return p[1]; }
	T* z(){ return p[2]; }

	/// Evaluate the cartesian coordinates of each point in the field. The resulting arrays are allocated in the same memory where the field is stored.
	void meshgrid(T* X, T* Y, T* Z, locationType location){
		//size_t array_size = sizeof(T) * R[0] * R[1];
		if(location == CPUmem){

			T du = (T)1.0 / (R[0] - 1);					//calculate the spacing between points in the grid
			T dv = (T)1.0 / (R[1] - 1);

			size_t ui, vi, i;
			stim::vec3<T> p;
			for(vi = 0; vi < R[1]; vi++){
				i = vi * R[0];
				for(ui = 0; ui < R[0]; ui++){
					p = rect<T>::p(ui * du, vi * dv);
					X[i] = p[0];
					Y[i] = p[1];
					Z[i] = p[2];
					i++;					
				}
			}
			//stim::cpu2image(X, "X.bmp", R[0], R[1], stim::cmBrewer);
			//stim::cpu2image(Y, "Y.bmp", R[0], R[1], stim::cmBrewer);
			//stim::cpu2image(Z, "Z.bmp", R[0], R[1], stim::cmBrewer);
		}
		else{
			std::cout<<"GPU allocation of a meshgrid isn't supported yet. You'll have to write kernels to do the calculation.";
			exit(1);
		}
	}

	/// Create a local meshgrid
	void meshgrid(){
		if(p[0]) return;								//if the p[0] value is not NULL, a meshgrid has already been created
		if(loc == CPUmem){
			p[0] = (T*) malloc( size() * sizeof(T) );
			p[1] = (T*) malloc( size() * sizeof(T) );
			p[2] = (T*) malloc( size() * sizeof(T) );
		}
		else{
			std::cout<<"GPUmem meshgrid isn't implemented yet."<<std::endl;
			exit(1);
		}
		meshgrid(p[0], p[1], p[2], loc);
	}

	//clear the field, setting all values to zero
	void clear(){
		if(loc == GPUmem)
			HANDLE_ERROR(cudaMemset(E, 0, grid_bytes()));
		else
			memset(E, 0, grid_bytes());
	}

	void image(std::string filename, stim::complexComponentType type = complexMag, stim::colormapType cmap = stim::cmBrewer){

		if(loc == GPUmem){
			T* image;
			HANDLE_ERROR( cudaMalloc(&image, sizeof(T) * size()) );
			int threads = stim::maxThreadsPerBlock();												//get the maximum number of threads per block for the CUDA device
			dim3 blocks( R[0] * R[1] / threads + 1 );												//create a 1D array of blocks

			switch(type){
			case complexMag:
				cuda_abs<T><<< blocks, threads >>>(image, E, size());
				break;
			case complexReal:
				cuda_real<T><<< blocks, threads >>>(image, E, size());
				break;
			case complexImaginary:
				cuda_imag<T><<< blocks, threads >>>(image, E, size());
				break;
			case complexIntensity:
				cuda_intensity<T><<< blocks, threads >>>(image, E, size());
				break;
			}
			stim::gpu2image<T>(image, filename, R[0], R[1], stim::cmBrewer);
			HANDLE_ERROR( cudaFree(image) );
		}
		else{
			T* image = (T*) malloc( sizeof(T) * size() );				//allocate space for the real image

			switch(type){												//get the specified component from the complex value
			case complexMag:
				stim::abs(image, E, size());
				break;
			case complexReal:
				stim::real(image, E, size());
				break;
			case complexImaginary:
				stim::imag(image, E, size());
				break;
			case complexIntensity:
				stim::intensity(image, E, size());
				break;
			}
			stim::cpu2image(image, filename, R[0], R[1], cmap);			//save the resulting image
			free(image);												//free the real image
		}
	}

	void image(T* img, stim::complexComponentType type = complexMag){
		if(loc == GPUmem) to_cpu();									//if the field is in the GPU, move it to the CPU

		switch(type){												//get the specified component from the complex value
		case complexMag:
			stim::abs(img, E, size());
			break;
		case complexReal:
			stim::real(img, E, size());
			break;
		case complexImaginary:
			stim::imag(img, E, size());
			break;
		case complexIntensity:
			stim::intensity(img, E, size());
			break;
		}
		//stim::cpu2image(image, filename, R[0], R[1], cmap);			//save the resulting image
		//free(image);												//free the real image
	}

	//adds the field intensity to the output array (useful for calculating detector response to incoherent fields)
	void intensity(T* out){
		if(loc == GPUmem){
			//T* image;
			//HANDLE_ERROR( cudaMalloc(&image, sizeof(T) * size()) );
			int threads = stim::maxThreadsPerBlock();												//get the maximum number of threads per block for the CUDA device
			dim3 blocks( R[0] * R[1] / threads + 1 );												//create a 1D array of blocks
			cuda_sum_intensity<T><<< blocks, threads >>>(out, E, size());
		}
		else{
			T* image = (T*) malloc( sizeof(T) * size() );				//allocate space for the real image
			stim::intensity(image, E, size());							//calculate the intensity
		
			size_t N = size();											//calculate the number of elements in the field
			for(size_t n = 0; n < N; n++)								//for each point in the field
				out[n] += image[n];										//add the field intensity to the output image

			free(image);												//free the temporary intensity image
		}
	}

};				//end class scalarfield
}

//stream insertion operator
template<typename T>
std::ostream& operator<<(std::ostream& os, stim::scalarfield<T>& rhs){
	os<<rhs.str();
	return os;
}


#endif