|
|
@ -23,7 +23,7 @@ void scale_bias_gpu(float *output, float *biases, int batch, int n, int size) |
|
|
|
dim3 dimGrid((size-1)/BLOCK + 1, n, batch); |
|
|
|
dim3 dimGrid((size-1)/BLOCK + 1, n, batch); |
|
|
|
dim3 dimBlock(BLOCK, 1, 1); |
|
|
|
dim3 dimBlock(BLOCK, 1, 1); |
|
|
|
|
|
|
|
|
|
|
|
scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); |
|
|
|
scale_bias_kernel<<<dimGrid, dimBlock, 0, get_cuda_stream()>>>(output, biases, n, size); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -67,7 +67,7 @@ void add_bias_gpu(float *output, float *biases, int batch, int n, int size) |
|
|
|
dim3 dimGrid((size-1)/BLOCK + 1, n, batch); |
|
|
|
dim3 dimGrid((size-1)/BLOCK + 1, n, batch); |
|
|
|
dim3 dimBlock(BLOCK, 1, 1); |
|
|
|
dim3 dimBlock(BLOCK, 1, 1); |
|
|
|
|
|
|
|
|
|
|
|
add_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size); |
|
|
|
add_bias_kernel<<<dimGrid, dimBlock, 0, get_cuda_stream()>>>(output, biases, n, size); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -427,7 +427,7 @@ __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY) |
|
|
|
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) |
|
|
|
extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) |
|
|
|
{ |
|
|
|
{ |
|
|
|
size_t N = batch*filters*spatial; |
|
|
|
size_t N = batch*filters*spatial; |
|
|
|
normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); |
|
|
|
normalize_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, x, mean, variance, batch, filters, spatial); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -490,13 +490,13 @@ __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filt |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
|
|
|
extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); |
|
|
|
fast_mean_kernel<<<filters, BLOCK, 0, get_cuda_stream()>>>(x, batch, filters, spatial, mean); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
|
|
|
extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); |
|
|
|
fast_variance_kernel<<<filters, BLOCK, 0, get_cuda_stream() >>>(x, mean, batch, filters, spatial, variance); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -520,13 +520,13 @@ extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, i |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void pow_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) |
|
|
|
extern "C" void pow_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) |
|
|
|
{ |
|
|
|
{ |
|
|
|
pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY); |
|
|
|
pow_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, ALPHA, X, INCX, Y, INCY); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) |
|
|
|
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) |
|
|
|
{ |
|
|
|
{ |
|
|
|
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); |
|
|
|
axpy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -543,7 +543,7 @@ extern "C" void mul_ongpu(int N, float * X, int INCX, float * Y, int INCY) |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) |
|
|
|
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY) |
|
|
|
{ |
|
|
|
{ |
|
|
|
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY); |
|
|
|
copy_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, X, OFFX, INCX, Y, OFFY, INCY); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -567,20 +567,20 @@ __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int bat |
|
|
|
extern "C" void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out) |
|
|
|
extern "C" void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out) |
|
|
|
{ |
|
|
|
{ |
|
|
|
int size = spatial*batch*layers; |
|
|
|
int size = spatial*batch*layers; |
|
|
|
flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out); |
|
|
|
flatten_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, x, spatial, layers, batch, forward, out); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) |
|
|
|
extern "C" void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out) |
|
|
|
{ |
|
|
|
{ |
|
|
|
int size = w*h*c*batch; |
|
|
|
int size = w*h*c*batch; |
|
|
|
reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out); |
|
|
|
reorg_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream()>>>(size, x, w, h, c, batch, stride, forward, out); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask) |
|
|
|
extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask) |
|
|
|
{ |
|
|
|
{ |
|
|
|
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask); |
|
|
|
mask_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream() >>>(N, X, mask_num, mask); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -599,7 +599,7 @@ extern "C" void constrain_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
{ |
|
|
|
{ |
|
|
|
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); |
|
|
|
scal_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, INCX); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -611,7 +611,7 @@ extern "C" void supp_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
|
|
|
|
|
|
|
|
extern "C" void fill_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
extern "C" void fill_ongpu(int N, float ALPHA, float * X, int INCX) |
|
|
|
{ |
|
|
|
{ |
|
|
|
fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); |
|
|
|
fill_kernel<<<cuda_gridsize(N), BLOCK, 0, get_cuda_stream()>>>(N, ALPHA, X, INCX); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -766,6 +766,6 @@ extern "C" void softmax_gpu(float *input, int n, int offset, int groups, float t |
|
|
|
{ |
|
|
|
{ |
|
|
|
int inputs = n; |
|
|
|
int inputs = n; |
|
|
|
int batch = groups; |
|
|
|
int batch = groups; |
|
|
|
softmax_kernel<<<cuda_gridsize(batch), BLOCK>>>(inputs, offset, batch, input, temp, output); |
|
|
|
softmax_kernel<<<cuda_gridsize(batch), BLOCK, 0, get_cuda_stream()>>>(inputs, offset, batch, input, temp, output); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
check_error(cudaPeekAtLastError()); |
|
|
|
} |
|
|
|
} |
|
|
|