Minor fix for mAP calculation during training

pull/2160/head
AlexeyAB 6 years ago
parent 95773cfb42
commit dc827f4c1c
  1. 30
      src/detector.c
  2. 8
      src/image.c
  3. 32
      src/network.c
  4. 1
      src/network.h

@ -67,6 +67,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
}
#ifdef GPU
cuda_free(net_map.workspace);
cuda_free(net_map.input_state_gpu);
if (*net_map.input16_gpu) cuda_free(*net_map.input16_gpu);
if (*net_map.output16_gpu) cuda_free(*net_map.output16_gpu);
#else
@ -245,7 +246,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
#ifdef OPENCV
if (!dont_show) {
int draw_precision = 0;
int calc_map_for_each = 4 * train_images_num / (net.batch * net.subdivisions);
int calc_map_for_each = 4 * train_images_num / (net.batch * net.subdivisions); // calculate mAP for each 4 Epochs
if (calc_map && (i >= (iter_map + calc_map_for_each) || i == net.max_batches) && i >= net.burn_in && i >= 1000) {
if (l.random) {
printf("Resizing to initial size: %d x %d \n", init_w, init_h);
@ -262,32 +263,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
}
// combine Training and Validation networks
network net_combined = make_network(net.n);
layer *old_layers = net_combined.layers;
net_combined = net;
net_combined.layers = old_layers;
net_combined.batch = 1;
int k;
for (k = 0; k < net.n; ++k) {
layer *l = &(net.layers[k]);
net_combined.layers[k] = net.layers[k];
net_combined.layers[k].batch = 1;
if (l->type == CONVOLUTIONAL) {
#ifdef CUDNN
net_combined.layers[k].normTensorDesc = net_map.layers[k].normTensorDesc;
net_combined.layers[k].normDstTensorDesc = net_map.layers[k].normDstTensorDesc;
net_combined.layers[k].normDstTensorDescF16 = net_map.layers[k].normDstTensorDescF16;
net_combined.layers[k].srcTensorDesc = net_map.layers[k].srcTensorDesc;
net_combined.layers[k].dstTensorDesc = net_map.layers[k].dstTensorDesc;
net_combined.layers[k].srcTensorDesc16 = net_map.layers[k].srcTensorDesc16;
net_combined.layers[k].dstTensorDesc16 = net_map.layers[k].dstTensorDesc16;
#endif // CUDNN
}
}
network net_combined = combine_train_valid_networks(net, net_map);
iter_map = i;
mean_average_precision = validate_detector_map(datacfg, cfgfile, weightfile, 0.25, 0.5, &net_combined);

@ -710,7 +710,7 @@ IplImage* draw_train_chart(float max_img_loss, int max_batches, int number_of_li
char max_batches_buff[100];
sprintf(max_batches_buff, "in cfg max_batches=%d", max_batches);
cvPutText(img, max_batches_buff, cvPoint(draw_size - 195, img_size - 10), &font, CV_RGB(0, 0, 0));
cvPutText(img, "Press 's' to save: chart.jpg", cvPoint(5, img_size - 10), &font, CV_RGB(0, 0, 0));
cvPutText(img, "Press 's' to save: chart.png", cvPoint(5, img_size - 10), &font, CV_RGB(0, 0, 0));
printf(" If error occurs - run training with flag: -dont_show \n");
cvNamedWindow("average loss", CV_WINDOW_NORMAL);
cvMoveWindow("average loss", 0, 0);
@ -770,7 +770,11 @@ void draw_train_loss(IplImage* img, int img_size, float avg_loss, float max_img_
cvShowImage("average loss", img);
int k = cvWaitKey(20);
if (k == 's' || current_batch == (max_batches - 1) || current_batch % 100 == 0) {
cvSaveImage("chart.jpg", img, 0);
//cvSaveImage("chart.jpg", img, 0);
IplImage* img_rgb = cvCreateImage(cvSize(img->width, img->height), 8, 3);
cvCvtColor(img, img_rgb, CV_RGB2BGR);
stbi_write_png("chart.png", img_rgb->width, img_rgb->height, 3, (char *)img_rgb->imageData, 0);
cvRelease(&img_rgb);
cvPutText(img, "- Saved", cvPoint(250, img_size - 10), &font, CV_RGB(255, 0, 0));
}
else

@ -878,4 +878,36 @@ void calculate_binary_weights(network net)
}
//printf("\n calculate_binary_weights Done! \n");
}
// combine Training and Validation networks
network combine_train_valid_networks(network net_train, network net_map)
{
network net_combined = make_network(net_train.n);
layer *old_layers = net_combined.layers;
net_combined = net_train;
net_combined.layers = old_layers;
net_combined.batch = 1;
int k;
for (k = 0; k < net_train.n; ++k) {
layer *l = &(net_train.layers[k]);
net_combined.layers[k] = net_train.layers[k];
net_combined.layers[k].batch = 1;
if (l->type == CONVOLUTIONAL) {
#ifdef CUDNN
net_combined.layers[k].normTensorDesc = net_map.layers[k].normTensorDesc;
net_combined.layers[k].normDstTensorDesc = net_map.layers[k].normDstTensorDesc;
net_combined.layers[k].normDstTensorDescF16 = net_map.layers[k].normDstTensorDescF16;
net_combined.layers[k].srcTensorDesc = net_map.layers[k].srcTensorDesc;
net_combined.layers[k].dstTensorDesc = net_map.layers[k].dstTensorDesc;
net_combined.layers[k].srcTensorDesc16 = net_map.layers[k].srcTensorDesc16;
net_combined.layers[k].dstTensorDesc16 = net_map.layers[k].dstTensorDesc16;
#endif // CUDNN
}
}
return net_combined;
}

@ -156,6 +156,7 @@ int get_network_nuisance(network net);
int get_network_background(network net);
YOLODLL_API void fuse_conv_batchnorm(network net);
YOLODLL_API void calculate_binary_weights(network net);
network combine_train_valid_networks(network net_train, network net_map);
#ifdef __cplusplus
}

Loading…
Cancel
Save