update_dir_bb.cuh
6.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
#ifndef STIM_CUDA_UPDATE_DIR_BB_H
#define STIM_CUDA_UPDATE_DIR_BB_H
# include <iostream>
# include <cuda.h>
#include <stim/cuda/cudatools.h>
#include <stim/cuda/sharedmem.cuh>
#include <stim/visualization/aabb2.h>
#include <stim/visualization/colormap.h>
#include <math.h>
//#define RMAX_TEST 8
namespace stim{
namespace cuda{
template<typename T>
__global__ void cuda_update_dir(T* gpuDir, T* gpuVote, T* gpuGrad, T* gpuTable, T phi, int rmax, int x, int y){
extern __shared__ T S[];
T* shared_atan = S;
size_t n_table = (rmax * 2 + 1) * (rmax * 2 + 1);
stim::cuda::threadedMemcpy((char*)shared_atan, (char*)gpuTable, sizeof(T) * n_table, threadIdx.x, blockDim.x);
//T* shared_vote = &S[n_table];
//size_t template_size_x = (blockDim.x + 2 * rmax);
//size_t template_size_y = (blockDim.y + 2 * rmax);
//stim::cuda::threadedMemcpy2D((char*)shared_vote, (char*)gpuVote, template_size_x, template_size_y, x, threadIdx.y * blockDim.x + threadIdx.x, blockDim.x * blockDim.y);
int xi = blockIdx.x * blockDim.x + threadIdx.x; //calculate the 2D coordinates for this current thread.
int yi = blockIdx.y * blockDim.y + threadIdx.y;
if(xi >= x || yi >= y) return; //if the index is outside of the image, terminate the kernel
int i = yi * x + xi; //convert 2D coordinates to 1D
float theta = gpuGrad[2*i]; //calculate the voting direction based on the grtadient direction - global memory fetch
stim::aabb2<int> bb(xi, yi); //initialize a bounding box at the current point
bb.insert(xi + ceil(rmax * cos(theta)), ceil(yi + rmax * sin(theta)));
bb.insert(xi + ceil(rmax * cos(theta - phi)), yi + ceil(rmax * sin(theta - phi))); //insert one corner of the triangle into the bounding box
bb.insert(xi + ceil(rmax * cos(theta + phi)), yi + ceil(rmax * sin(theta + phi))); //insert the final corner into the bounding box
int x_table = 2*rmax +1;
T rmax_sq = rmax * rmax;
int lut_i;
T dx_sq, dy_sq;
bb.trim_low(0, 0); //make sure the bounding box doesn't go outside the image
bb.trim_high(x-1, y-1);
int by, bx;
int dx, dy; //coordinate relative to (xi, yi)
T v;
T max_v = 0; //initialize the maximum vote value to zero
T alpha;
int max_dx = bb.low[0] - xi;
int max_dy = bb.low[1] - yi;
for(by = bb.low[1]; by <= bb.high[1]; by++){ //for each element in the bounding box
dy = by - yi; //calculate the y coordinate of the current point relative to yi
dy_sq = dy * dy;
for(bx = bb.low[0]; bx <= bb.high[0]; bx++){
dx = bx - xi;
dx_sq = dx * dx;
lut_i = (rmax - dy) * x_table + rmax - dx;
alpha = shared_atan[lut_i];
if(dx_sq + dy_sq < rmax_sq && abs(alpha - theta) < phi){
v = gpuVote[by * x + bx]; // find the vote value for the current counter
if(v > max_v){
max_v = v;
max_dx = dx;
max_dy = dy;
}
}
}
}
gpuDir[i] = atan2((T)max_dy, (T)max_dx);
}
// this kernel updates the gradient direction by the calculated voting direction.
template<typename T>
__global__ void cuda_update_grad(T* gpuGrad, T* gpuDir, size_t x, size_t y){
// calculate the 2D coordinates for this current thread.
size_t xi = blockIdx.x * blockDim.x + threadIdx.x;
size_t yi = blockIdx.y * blockDim.y + threadIdx.y;
if(xi >= x || yi >= y) return;
// convert 2D coordinates to 1D
size_t i = yi * x + xi;
//update the gradient image with the vote direction
gpuGrad[2*i] = gpuDir[i];
}
template<typename T>
void gpu_update_dir(T* gpuVote, T* gpuGrad, T* gpuTable, T phi, unsigned int rmax, size_t x, size_t y){
//calculate the number of bytes in the array
size_t bytes = x * y * sizeof(T);
// allocate space on the GPU for the updated vote direction
T* gpuDir;
HANDLE_ERROR( cudaMalloc(&gpuDir, bytes) );
unsigned int max_threads = stim::maxThreadsPerBlock();
dim3 threads( (unsigned int)sqrt(max_threads), (unsigned int)sqrt(max_threads) );
dim3 blocks((unsigned int)x/threads.x + 1, (unsigned int)y/threads.y + 1);
size_t table_bytes = sizeof(T) * (rmax * 2 + 1) * (rmax * 2 + 1);
//size_t curtain = 2 * rmax;
//size_t template_bytes = sizeof(T) * (threads.x + curtain) * (threads.y + curtain);
size_t shared_mem_req = table_bytes;// + template_bytes;
if (DEBUG) std::cout << "Shared Memory required: " << shared_mem_req << std::endl;
size_t shared_mem = stim::sharedMemPerBlock();
if(shared_mem_req > shared_mem){
std::cout<<"Error: insufficient shared memory for this implementation of cuda_update_dir()."<<std::endl;
exit(1);
}
//call the kernel to calculate the new voting direction
cuda_update_dir <<< blocks, threads, shared_mem_req>>>(gpuDir, gpuVote, gpuGrad, gpuTable, phi, rmax, (int)x , (int)y);
//call the kernel to update the gradient direction
cuda_update_grad <<< blocks, threads >>>(gpuGrad, gpuDir, (int)x , (int)y);
//free allocated memory
HANDLE_ERROR( cudaFree(gpuDir) );
}
template<typename T>
void cpu_update_dir(T* cpuVote, T* cpuGrad,T* cpuTable, T phi, unsigned int rmax, unsigned int x, unsigned int y){
//calculate the number of bytes in the array
unsigned int bytes = x * y * sizeof(T);
//calculate the number of bytes in the atan2 table
unsigned int bytes_table = (2*rmax+1) * (2*rmax+1) * sizeof(T);
//allocate space on the GPU for the Vote Image
T* gpuVote;
cudaMalloc(&gpuVote, bytes);
//copy the input vote image to the GPU
HANDLE_ERROR(cudaMemcpy(gpuVote, cpuVote, bytes, cudaMemcpyHostToDevice));
//allocate space on the GPU for the input Gradient image
T* gpuGrad;
HANDLE_ERROR(cudaMalloc(&gpuGrad, bytes*2));
//copy the Gradient data to the GPU
HANDLE_ERROR(cudaMemcpy(gpuGrad, cpuGrad, bytes*2, cudaMemcpyHostToDevice));
//allocate space on the GPU for the atan2 table
T* gpuTable;
HANDLE_ERROR(cudaMalloc(&gpuTable, bytes_table));
//copy the atan2 values to the GPU
HANDLE_ERROR(cudaMemcpy(gpuTable, cpuTable, bytes_table, cudaMemcpyHostToDevice));
//call the GPU version of the update direction function
gpu_update_dir<T>(gpuVote, gpuGrad, gpuTable, phi, rmax, x , y);
//copy the new gradient image back to the CPU
cudaMemcpy(cpuGrad, gpuGrad, bytes*2, cudaMemcpyDeviceToHost) ;
//free allocated memory
cudaFree(gpuTable);
cudaFree(gpuVote);
cudaFree(gpuGrad);
}
}
}
#endif