Temporary experimental XNOR on GPU (repack channels)

pull/2282/head
AlexeyAB 6 years ago
parent 920d792a0c
commit 4c05166215
  1. 134
      src/convolutional_kernels.cu
  2. 5
      src/convolutional_layer.c
  3. 1
      src/gemm.c
  4. 3
      src/http_stream.cpp
  5. 11
      src/im2col.h
  6. 259
      src/im2col_kernels.cu

@ -190,7 +190,7 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
//state.input = l.binary_input_gpu; //state.input = l.binary_input_gpu;
//cudaDeviceSynchronize(); //cudaDeviceSynchronize();
if (l.align_bit_weights_gpu && !state.train && l.c >= 256 && l.size > 1) if (l.align_bit_weights_gpu && !state.train && l.c >= 64)// && l.size > 1)
{ {
//return; //return;
cudaError_t status = cudaSuccess; cudaError_t status = cudaSuccess;
@ -207,10 +207,135 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
size_t t_bit_input_size = t_intput_size / 8;// +1; size_t t_bit_input_size = t_intput_size / 8;// +1;
//if(0) //if(0)
if (l.c % 32 == 0)
//if (l.stride == 1 && l.pad == 1 && l.c % 32 == 0)
//if(1)
{ {
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h));
//float *intput_cpu = (float *)calloc(l.inputs, sizeof(float));
// state.input
//cudaMemcpy(intput_cpu, state.input, l.inputs * sizeof(float), cudaMemcpyDefault);
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * l.bit_align;// n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
const int new_c = l.c / 32;
//float *re_packed_input = (float *)calloc(l.c * l.w * l.h, sizeof(float));
//uint32_t *bin_re_packed_input = (uint32_t *)calloc(new_c * l.w * l.h + 1, sizeof(uint32_t));
// float32x4 by channel (as in cuDNN)
//repack_input(intput_cpu, re_packed_input, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit(re_packed_input, (uint8_t *)bin_re_packed_input, l.c * l.w * l.h);
//cudaDeviceSynchronize();
//start_timer();
repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c);
//repack_input_gpu(state.input, state.workspace, l.w, l.h, l.c);
// 32 x floats -> 1 x uint32_t
//float_to_bit_gpu(state.workspace, (unsigned char *)l.align_workspace_gpu, l.c * l.w * l.h);// l.align_workspace_size);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu + float_to_bit_gpu");
//free(re_packed_input);
// slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN)
//convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output,
// l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr);
// // then exit from if()
//float *b = state.workspace;
//float *b = (float *)calloc(100 * 1024 * 1024, sizeof(float));
//float *c = l.output;
//memset(c, 0, l.outputs * sizeof(float));
//im2col_cpu_custom((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b);
//cudaMemcpy(l.align_workspace_gpu, bin_re_packed_input, (new_c * l.w * l.h + 1) * sizeof(uint32_t), cudaMemcpyDefault);
//start_timer();
im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_ongpu");
//free(bin_re_packed_input);
int new_k = l.size*l.size*l.c / 32;
// good for (l.c == 64)
//gemm_nn_bin_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// b, n,
// c, n, l.mean_arr);
// // then exit from if()
//size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
//char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char));
//transpose_uint32((uint32_t *)b, (uint32_t *)t_bit_input, new_k, n, n, new_ldb);
//cudaMemcpy(l.transposed_align_workspace_gpu, t_bit_input, t_bit_input_size * sizeof(char), cudaMemcpyDefault);
//cudaMemcpy(state.workspace, b, t_bit_input_size * sizeof(char), cudaMemcpyDefault);
//printf("\n n = %d, n % 32 = %d, new_ldb = %d, new_ldb % 32 = %d \n", n, n % 32, new_ldb, new_ldb % 32);
//start_timer();
transpose_uint32_gpu_2((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("transpose_uint32_gpu");
//cudaDeviceSynchronize();
//stop_timer_and_show_name("repack_input_gpu_bin + im2col_ongpu + transpose_uint32_gpu_2");
//start_timer();
gemm_nn_custom_bin_mean_transposed_gpu(m, n, k,
(unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu,
new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu);
//cudaDeviceSynchronize();
//stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu");
// the main GEMM function
//gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (uint8_t *)l.align_bit_weights, new_ldb, (uint8_t *)t_bit_input, new_ldb, c, n, l.mean_arr);
//add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
//cudaMemcpy(l.output_gpu, l.output, l.outputs * sizeof(float), cudaMemcpyDefault);
// // alternative GEMM
//gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1,
// l.align_bit_weights, l.new_lda/32,
// t_bit_input, new_ldb / 32,
// c, n, l.mean_arr);
//free(t_bit_input);
//free(b);
}
else
{
//printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad);
//cudaDeviceSynchronize(); //cudaDeviceSynchronize();
int i = 0; int i = 0;
/*
// if (l.stride == 1 && l.c >= 256 && l.size > 1) // if (l.stride == 1 && l.c >= 256 && l.size > 1)
if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled
{ {
@ -220,9 +345,9 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
//cudaDeviceSynchronize(); //cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_bin_ongpu"); //stop_timer_and_show_name("im2col_align_bin_ongpu");
} }
else else*/
{ {
//start_timer(); start_timer();
im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align);
//cudaDeviceSynchronize(); //cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu"); //stop_timer_and_show_name("im2col_align_ongpu");
@ -239,6 +364,9 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
//cudaDeviceSynchronize(); //cudaDeviceSynchronize();
//stop_timer_and_show_name("transpose_bin_gpu"); //stop_timer_and_show_name("transpose_bin_gpu");
//cudaDeviceSynchronize();
//stop_timer_and_show_name("im2col_align_ongpu + float_to_bit_gpu + transpose_bin_gpu");
// should be optimized // should be optimized
//if(0) {//if (k > 1000) { // sequentially input-shared - BAD //if(0) {//if (k > 1000) { // sequentially input-shared - BAD
// gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k, // gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k,

@ -704,8 +704,9 @@ void binary_align_weights(convolutional_layer *l)
} }
//if (l->c % 32 == 0) if (l->c % 32 == 0)
if(gpu_index < 0 && l->stride == 1 && l->pad == 1 && l->c % 32 == 0) //if(gpu_index < 0 && l->stride == 1 && l->pad == 1 && l->c % 32 == 0)
//if (l->stride == 1 && l->pad == 1 && l->c % 32 == 0)
{ {
int fil, chan; int fil, chan;
const int items_per_filter = l->c * l->size * l->size; const int items_per_filter = l->c * l->size * l->size;

@ -755,6 +755,7 @@ void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
__m256i all_1 = _mm256_set1_epi8(255); __m256i all_1 = _mm256_set1_epi8(255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps( __m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 1)),

@ -631,8 +631,9 @@ void stop_timer_and_show() {
} }
void stop_timer_and_show_name(char *name) { void stop_timer_and_show_name(char *name) {
stop_timer();
std::cout << " " << name; std::cout << " " << name;
stop_timer_and_show(); std::cout << " " << get_time() * 1000 << " msec" << std::endl;
} }
void show_total_time() { void show_total_time() {

@ -2,6 +2,7 @@
#define IM2COL_H #define IM2COL_H
#include <stddef.h> #include <stddef.h>
#include <stdint.h>
void im2col_cpu(float* data_im, void im2col_cpu(float* data_im,
int channels, int height, int width, int channels, int height, int width,
@ -26,6 +27,16 @@ void float_to_bit_gpu(float *src, unsigned char *dst, size_t size);
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m, void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size); const int lda, const int ldb, const int block_size);
void transpose_uint32_gpu(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align);
void transpose_uint32_gpu_2(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align);
void repack_input_gpu(float *input, float *re_packed_input, int w, int h, int c);
void repack_input_gpu_2(float *input, float *re_packed_input, int w, int h, int c);
void repack_input_gpu_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c);
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size); void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size);
// shared_memory + partial coalescing = GOOD // shared_memory + partial coalescing = GOOD

@ -571,6 +571,7 @@ __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t s
} }
*/ */
/*
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size) __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{ {
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE); //const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
@ -591,6 +592,7 @@ __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t s
const int lane_id = threadIdx.x % WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE;
uint32_t bit_mask = __ballot(src_val > 0); uint32_t bit_mask = __ballot(src_val > 0);
if (lane_id == 0) tmp[warp_id] = bit_mask; if (lane_id == 0) tmp[warp_id] = bit_mask;
__syncthreads(); __syncthreads();
@ -602,11 +604,38 @@ __global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t s
__syncthreads(); __syncthreads();
} }
} }
*/
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
__shared__ uint32_t tmp[WARP_SIZE*32];
int index = 32*blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
uint32_t *dst32_ptr = ((unsigned int*)dst);
int i;
for(i = 0; i < 32; ++i)
{
if ((index + i * 1024) < size) src_val = src[index + i*1024];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
const int num_of_warps = blockDim.x / WARP_SIZE;
const int warp_id = threadIdx.x / WARP_SIZE;
const int lane_id = threadIdx.x % WARP_SIZE;
uint32_t bit_mask = __ballot(src_val > 0);
if (lane_id == 0) tmp[i * 32 + warp_id] = bit_mask;
}
__syncthreads();
dst32_ptr[blockIdx.x*blockDim.x + threadIdx.x] = tmp[threadIdx.x];
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size) void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{ {
const int num_blocks = size / 1024 + 1; //const int num_blocks = size / 1024 + 1;
const int num_blocks = size / (32*1024) + 1;
float_to_bit_gpu_kernel<<<num_blocks, 1024, 0, get_cuda_stream()>>>(src, dst, size); float_to_bit_gpu_kernel<<<num_blocks, 1024, 0, get_cuda_stream()>>>(src, dst, size);
} }
// -------------------------------- // --------------------------------
@ -828,6 +857,234 @@ void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const in
} }
// -------------------------------- // --------------------------------
__global__ void transpose_uint32_kernel(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < src_h; i += 1)
int i = index % src_h; // l.size*l.size*l.c;
{
//for (j = 0; j < src_w; j += 1)
int j = index / src_h; // out_h*out_w;
if(j < src_w)
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void transpose_uint32_gpu(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
int size = src_w * src_h;
const int num_blocks = size / BLOCK + 1;
transpose_uint32_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(src, dst, src_h, src_w, src_align, dst_align);
}
// --------------------------------
//#define TRANS_LOOP 10
__global__ void transpose_uint32_kernel_2(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
__shared__ uint32_t tmp[33 * 32]; // misaligned_array[32x32]
const int w_align = 33;
//const int shared_size = w_align * 32;
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
const int src_w_align = src_w + (32 - src_w % 32);
const int src_h_align = src_h + (32 - src_h % 32);
const int warps_in_width = src_w_align / 32;
const int warps_in_height = src_h_align / 32;
const int local_x = threadIdx.x % 32; // index % 32;
const int local_x_index = threadIdx.x / 32; // index / 32;
const int local_y = local_x_index % 32;
//#pragma unroll TRANS_LOOP
//for (int i = 0; i < TRANS_LOOP; ++i)
{
const int global_index = blockIdx.x;// blockIdx.x*TRANS_LOOP + i;// local_x_index / 32;
const int global_x_index = global_index % warps_in_width;
const int global_y_index = global_index / warps_in_width;
const int global_x = global_x_index * 32 + local_x;
const int global_y = global_y_index * 32 + local_y;
uint32_t val = 0;
if (global_x < src_w && global_y < src_h) {
val = src[global_y * src_align + global_x];
}
//dst[global_x * dst_align / 32 + global_y] = val;
//tmp[local_y * 32 + local_x] = val;
tmp[local_x * w_align + local_y] = val;
__syncthreads();
val = tmp[local_y * w_align + local_x];
const int new_global_x = global_y_index * 32 + local_x;
const int new_global_y = global_x_index * 32 + local_y;
if (new_global_x < src_h && new_global_y < src_w) {
dst[new_global_y * (dst_align / 32) + new_global_x] = val;
}
}
}
#define TRANS_BLOCK 1024
void transpose_uint32_gpu_2(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
int src_w_align = src_w + (32 - src_w % 32);
int src_h_align = src_h + (32 - src_h % 32);
int size = src_w_align * src_h_align;
int num_blocks = size / TRANS_BLOCK;
transpose_uint32_kernel_2 << <num_blocks, TRANS_BLOCK, 0, get_cuda_stream() >> >(src, dst, src_h, src_w, src_align, dst_align);
}
// --------------------------------
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
__global__ void repack_input_kernel(float *input, float *re_packed_input, int w, int h, int c)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int items_per_channel = w * h;
int c_pack = index % 32;
int chan_index = index / 32;
int chan = (chan_index * 32) % c;
int i = (chan_index * 32) / c;
//for (chan = 0; chan < c; chan += 32)
{
//for (i = 0; i < items_per_channel; ++i)
if(i < items_per_channel)
{
//for (c_pack = 0; c_pack < 32; ++c_pack)
{
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void repack_input_gpu(float *input, float *re_packed_input, int w, int h, int c)
{
int size = w * h * c;
const int num_blocks = size / BLOCK + 1;
repack_input_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(input, re_packed_input, w, h, c);
}
// --------------------------------
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
__global__ void repack_input_kernel_2(float *input, float *re_packed_input, int w, int h, int c)
{
__shared__ uint32_t tmp[33 * 32]; // 33x32 is misaligned 32 x 32 to avoid bank conflicts
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int items_per_channel = w * h;
int c_pack = index % 32;
int chan_index = index / 32;
int chan = (chan_index * 32) % c;
int i = (chan_index * 32) / c;
//for (chan = 0; chan < c; chan += 32)
{
//for (i = 0; i < items_per_channel; ++i)
if (i < items_per_channel)
{
//for (c_pack = 0; c_pack < 32; ++c_pack)
{
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void repack_input_gpu_2(float *input, float *re_packed_input, int w, int h, int c)
{
int size = w * h * c;
const int num_blocks = size / BLOCK + 1;
repack_input_kernel_2 << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(input, re_packed_input, w, h, c);
}
// --------------------------------
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
__global__ void repack_input_kernel_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
__shared__ uint32_t tmp[32];
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int num_of_warps = blockDim.x / WARP_SIZE;
const int warp_id = threadIdx.x / WARP_SIZE;
const int lane_id = threadIdx.x % WARP_SIZE;
const int items_per_channel = w * h;
int c_pack = index % 32;
int chan_index = index / 32;
//int chan = (chan_index * 32) % c;
//int i = (chan_index * 32) / c;
int i = (chan_index) % items_per_channel;
int chan = ((chan_index ) / items_per_channel)*32;
//for (chan = 0; chan < c; chan += 32)
if(chan < c)
{
//for (i = 0; i < items_per_channel; ++i)
//if (i < items_per_channel)
{
//for (c_pack = 0; c_pack < 32; ++c_pack)
{
float src = input[(chan + c_pack)*items_per_channel + i];
uint32_t bit_mask = __ballot(src > 0);
//if (threadIdx.x % 32 == 0)
// re_packed_input_bin[chan*items_per_channel/32 + i + c_pack/32] = bit_mask;
if (lane_id == 0) tmp[warp_id] = bit_mask;
__syncthreads();
if (warp_id == 0) {
if (lane_id < num_of_warps) {
re_packed_input_bin[chan*items_per_channel / 32 + i + lane_id] = tmp[lane_id];
}
}
__syncthreads();
}
}
}
}
void repack_input_gpu_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
int size = w * h * c;
const int num_blocks = size / BLOCK + 1;
repack_input_kernel_bin << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(input, re_packed_input_bin, w, h, c);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) { __global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x; int index = blockIdx.x*blockDim.x + threadIdx.x;

Loading…
Cancel
Save