|
|
@ -169,7 +169,51 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) |
|
|
|
l.dstTensorDesc, |
|
|
|
l.dstTensorDesc, |
|
|
|
output16); |
|
|
|
output16); |
|
|
|
|
|
|
|
|
|
|
|
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); |
|
|
|
|
|
|
|
|
|
|
|
if (l.batch_normalize) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
if (state.train) // Training |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1); |
|
|
|
|
|
|
|
//cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream()); |
|
|
|
|
|
|
|
float one = 1; |
|
|
|
|
|
|
|
float zero = 0; |
|
|
|
|
|
|
|
// Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth |
|
|
|
|
|
|
|
// compared to FP32, it’s just that the statistics and value adjustment should be done in FP32. |
|
|
|
|
|
|
|
cudnnBatchNormalizationForwardTraining(cudnn_handle(), |
|
|
|
|
|
|
|
CUDNN_BATCHNORM_SPATIAL, |
|
|
|
|
|
|
|
&one, |
|
|
|
|
|
|
|
&zero, |
|
|
|
|
|
|
|
l.normDstTensorDescF16, |
|
|
|
|
|
|
|
l.x_gpu, // input |
|
|
|
|
|
|
|
l.normDstTensorDescF16, |
|
|
|
|
|
|
|
output16, // output |
|
|
|
|
|
|
|
l.normTensorDesc, |
|
|
|
|
|
|
|
l.scales_gpu, |
|
|
|
|
|
|
|
l.biases_gpu, |
|
|
|
|
|
|
|
.01, |
|
|
|
|
|
|
|
l.rolling_mean_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
l.rolling_variance_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
.00001, |
|
|
|
|
|
|
|
l.mean_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
l.variance_gpu); // output (should be FP32) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); |
|
|
|
|
|
|
|
//forward_batchnorm_layer_gpu(l, state); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
else // Detection |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); |
|
|
|
|
|
|
|
normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); |
|
|
|
|
|
|
|
scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); |
|
|
|
|
|
|
|
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
else // BIAS only |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); |
|
|
|
|
|
|
|
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#else |
|
|
|
#else |
|
|
|
|
|
|
|
|
|
|
@ -186,7 +230,7 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) |
|
|
|
&one, |
|
|
|
&one, |
|
|
|
l.dstTensorDesc, |
|
|
|
l.dstTensorDesc, |
|
|
|
l.output_gpu); |
|
|
|
l.output_gpu); |
|
|
|
#endif |
|
|
|
#endif // CUDNN_HALF |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#else |
|
|
|
#else |
|
|
@ -203,12 +247,14 @@ void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef CUDNN_HALF |
|
|
|
if (l.batch_normalize) { |
|
|
|
if (l.batch_normalize) { |
|
|
|
forward_batchnorm_layer_gpu(l, state); |
|
|
|
forward_batchnorm_layer_gpu(l, state); |
|
|
|
} |
|
|
|
} |
|
|
|
else { |
|
|
|
else { |
|
|
|
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
#endif // no CUDNN_HALF |
|
|
|
|
|
|
|
|
|
|
|
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); |
|
|
|
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); |
|
|
|
//if(l.dot > 0) dot_error_gpu(l); |
|
|
|
//if(l.dot > 0) dot_error_gpu(l); |
|
|
@ -222,12 +268,13 @@ void backward_convolutional_layer_gpu(convolutional_layer l, network_state state |
|
|
|
|
|
|
|
|
|
|
|
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef CUDNN_HALF |
|
|
|
if(l.batch_normalize){ |
|
|
|
if(l.batch_normalize){ |
|
|
|
backward_batchnorm_layer_gpu(l, state); |
|
|
|
backward_batchnorm_layer_gpu(l, state); |
|
|
|
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1); |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
//axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1); |
|
|
|
//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
#endif // no CUDNN_HALF |
|
|
|
float *original_input = state.input; |
|
|
|
float *original_input = state.input; |
|
|
|
|
|
|
|
|
|
|
|
if(l.xnor) state.input = l.binary_input_gpu; |
|
|
|
if(l.xnor) state.input = l.binary_input_gpu; |
|
|
@ -257,6 +304,40 @@ void backward_convolutional_layer_gpu(convolutional_layer l, network_state state |
|
|
|
cuda_convert_f32_to_f16(state.input, input16_size, input16); |
|
|
|
cuda_convert_f32_to_f16(state.input, input16_size, input16); |
|
|
|
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); |
|
|
|
cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (l.batch_normalize) { |
|
|
|
|
|
|
|
//if (!state.train) { |
|
|
|
|
|
|
|
// l.mean_gpu = l.rolling_mean_gpu; |
|
|
|
|
|
|
|
// l.variance_gpu = l.rolling_variance_gpu; |
|
|
|
|
|
|
|
//} |
|
|
|
|
|
|
|
float one = 1; |
|
|
|
|
|
|
|
float zero = 0; |
|
|
|
|
|
|
|
cudnnBatchNormalizationBackward(cudnn_handle(), |
|
|
|
|
|
|
|
CUDNN_BATCHNORM_SPATIAL, |
|
|
|
|
|
|
|
&one, |
|
|
|
|
|
|
|
&zero, |
|
|
|
|
|
|
|
&one, |
|
|
|
|
|
|
|
&one, |
|
|
|
|
|
|
|
l.normDstTensorDescF16, |
|
|
|
|
|
|
|
l.x_gpu, // input |
|
|
|
|
|
|
|
l.normDstTensorDescF16, |
|
|
|
|
|
|
|
delta16, // input |
|
|
|
|
|
|
|
l.normDstTensorDescF16, |
|
|
|
|
|
|
|
l.x_norm_gpu, // output |
|
|
|
|
|
|
|
l.normTensorDesc, |
|
|
|
|
|
|
|
l.scales_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
l.scale_updates_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
l.bias_updates_gpu, // output (should be FP32) |
|
|
|
|
|
|
|
.00001, |
|
|
|
|
|
|
|
l.mean_gpu, // input (should be FP32) |
|
|
|
|
|
|
|
l.variance_gpu); // input (should be FP32) |
|
|
|
|
|
|
|
copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1); |
|
|
|
|
|
|
|
//cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
else |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 |
|
|
|
// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 |
|
|
|
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) |
|
|
|
// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) |
|
|
|
|
|
|
|
|
|
|
|