pull/4540/head
AlexeyAB 6 years ago
parent f831835125
commit 1626f9d495
  1. 3
      include/darknet.h
  2. 6
      src/classifier.c
  3. 13
      src/demo.c
  4. 8
      src/detector.c
  5. 14
      src/image_opencv.cpp
  6. 4
      src/image_opencv.h
  7. 49
      src/layer.c

@ -905,7 +905,8 @@ LIB_API image load_image_color(char *filename, int w, int h);
LIB_API void free_image(image m);
// layer.h
LIB_API void free_layer(layer);
LIB_API void free_layer_custom(layer l, int keep_cudnn_desc);
LIB_API void free_layer(layer l);
// data.c
LIB_API void free_data(data d);

@ -110,7 +110,9 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
float max_img_loss = 10;
int number_of_lines = 100;
int img_size = 1000;
img = draw_train_chart(max_img_loss, net.max_batches, number_of_lines, img_size, dont_show);
char windows_name[100];
sprintf(windows_name, "average loss (id:%d)", random_gen());
img = draw_train_chart(windows_name, max_img_loss, net.max_batches, number_of_lines, img_size, dont_show);
#endif //OPENCV
data train;
@ -174,7 +176,7 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/ train_images_num, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
#ifdef OPENCV
draw_train_loss(img, img_size, avg_loss, max_img_loss, i, net.max_batches, topk, draw_precision, topk_buff, dont_show, mjpeg_port);
draw_train_loss(windows_name, img, img_size, avg_loss, max_img_loss, i, net.max_batches, topk, draw_precision, topk_buff, dont_show, mjpeg_port);
#endif // OPENCV
if (i >= (iter_save + 1000)) {

@ -42,7 +42,6 @@ static int demo_json_port = -1;
static float* predictions[NFRAMES];
static int demo_index = 0;
static image images[NFRAMES];
static mat_cv* cv_images[NFRAMES];
static float *avg;
@ -147,7 +146,6 @@ void demo(char *cfgfile, char *weightfile, float thresh, float hier_thresh, int
avg = (float *) calloc(l.outputs, sizeof(float));
for(j = 0; j < NFRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
for(j = 0; j < NFRAMES; ++j) images[j] = make_image(1,1,3);
if (l.classes != demo_classes) {
printf("Parameters don't match: in cfg-file classes=%d, in data-file classes=%d \n", l.classes, demo_classes);
@ -171,6 +169,7 @@ void demo(char *cfgfile, char *weightfile, float thresh, float hier_thresh, int
det_s = in_s;
for (j = 0; j < NFRAMES / 2; ++j) {
free_detections(dets, nboxes);
fetch_in_thread(0);
detect_in_thread(0);
det_img = in_img;
@ -283,7 +282,6 @@ void demo(char *cfgfile, char *weightfile, float thresh, float hier_thresh, int
printf("\n cvWriteFrame \n");
}
release_mat(&show_img);
pthread_join(fetch_thread, 0);
pthread_join(detect_thread, 0);
@ -296,6 +294,7 @@ void demo(char *cfgfile, char *weightfile, float thresh, float hier_thresh, int
if (flag_exit == 1) break;
if(delay == 0){
release_mat(&show_img);
show_img = det_img;
}
det_img = in_img;
@ -320,13 +319,15 @@ void demo(char *cfgfile, char *weightfile, float thresh, float hier_thresh, int
}
// free memory
release_mat(&show_img);
release_mat(&in_img);
free_image(in_s);
free_detections(dets, nboxes);
free(avg);
for (j = 0; j < NFRAMES; ++j) free(predictions[j]);
for (j = 0; j < NFRAMES; ++j) free_image(images[j]);
demo_index = (NFRAMES + demo_index - 1) % NFRAMES;
for (j = 0; j < NFRAMES; ++j) {
release_mat(&cv_images[j]);
}
free_ptrs((void **)names, net.layers[net.n - 1].classes);

@ -45,7 +45,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
const int net_classes = net_map.layers[net_map.n - 1].classes;
int k; // free memory unnecessary arrays
for (k = 0; k < net_map.n - 1; ++k) free_layer(net_map.layers[k]);
for (k = 0; k < net_map.n - 1; ++k) free_layer_custom(net_map.layers[k], 1);
char *name_list = option_find_str(options, "names", "data/names.list");
int names_size = 0;
@ -147,7 +147,9 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
float max_img_loss = 5;
int number_of_lines = 100;
int img_size = 1000;
img = draw_train_chart(max_img_loss, net.max_batches, number_of_lines, img_size, dont_show);
char windows_name[100];
sprintf(windows_name, "average loss (id:%d)", random_gen());
img = draw_train_chart(windows_name, max_img_loss, net.max_batches, number_of_lines, img_size, dont_show);
#endif //OPENCV
if (net.track) {
args.track = net.track;
@ -292,7 +294,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
draw_precision = 1;
}
#ifdef OPENCV
draw_train_loss(img, img_size, avg_loss, max_img_loss, i, net.max_batches, mean_average_precision, draw_precision, "mAP%", dont_show, mjpeg_port);
draw_train_loss(windows_name, img, img_size, avg_loss, max_img_loss, i, net.max_batches, mean_average_precision, draw_precision, "mAP%", dont_show, mjpeg_port);
#endif // OPENCV
//if (i % 1000 == 0 || (i < 1000 && i % 100 == 0)) {

@ -1002,7 +1002,7 @@ extern "C" void draw_detections_cv_v3(mat_cv* mat, detection *dets, int num, flo
// ====================================================================
// Draw Loss & Accuracy chart
// ====================================================================
extern "C" mat_cv* draw_train_chart(float max_img_loss, int max_batches, int number_of_lines, int img_size, int dont_show)
extern "C" mat_cv* draw_train_chart(char *windows_name, float max_img_loss, int max_batches, int number_of_lines, int img_size, int dont_show)
{
int img_offset = 60;
int draw_size = img_size - img_offset;
@ -1047,10 +1047,10 @@ extern "C" mat_cv* draw_train_chart(float max_img_loss, int max_batches, int num
cv::putText(img, "Press 's' to save : chart.png", cv::Point(5, img_size - 10), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.7, CV_RGB(0, 0, 0), 1, CV_AA);
if (!dont_show) {
printf(" If error occurs - run training with flag: -dont_show \n");
cv::namedWindow("average loss", cv::WINDOW_NORMAL);
cv::moveWindow("average loss", 0, 0);
cv::resizeWindow("average loss", img_size, img_size);
cv::imshow("average loss", img);
cv::namedWindow(windows_name, cv::WINDOW_NORMAL);
cv::moveWindow(windows_name, 0, 0);
cv::resizeWindow(windows_name, img_size, img_size);
cv::imshow(windows_name, img);
cv::waitKey(20);
}
}
@ -1061,7 +1061,7 @@ extern "C" mat_cv* draw_train_chart(float max_img_loss, int max_batches, int num
}
// ----------------------------------------
extern "C" void draw_train_loss(mat_cv* img_src, int img_size, float avg_loss, float max_img_loss, int current_batch, int max_batches,
extern "C" void draw_train_loss(char *windows_name, mat_cv* img_src, int img_size, float avg_loss, float max_img_loss, int current_batch, int max_batches,
float precision, int draw_precision, char *accuracy_name, int dont_show, int mjpeg_port)
{
try {
@ -1113,7 +1113,7 @@ extern "C" void draw_train_loss(mat_cv* img_src, int img_size, float avg_loss, f
int k = 0;
if (!dont_show) {
cv::imshow("average loss", img);
cv::imshow(windows_name, img);
k = cv::waitKey(20);
}
static int old_batch = 0;

@ -92,8 +92,8 @@ void save_cv_jpg(mat_cv *img, const char *name);
void draw_detections_cv_v3(mat_cv* show_img, detection *dets, int num, float thresh, char **names, image **alphabet, int classes, int ext_output);
// Draw Loss & Accuracy chart
mat_cv* draw_train_chart(float max_img_loss, int max_batches, int number_of_lines, int img_size, int dont_show);
void draw_train_loss(mat_cv* img, int img_size, float avg_loss, float max_img_loss, int current_batch, int max_batches,
mat_cv* draw_train_chart(char *windows_name, float max_img_loss, int max_batches, int number_of_lines, int img_size, int dont_show);
void draw_train_loss(char *windows_name, mat_cv* img, int img_size, float avg_loss, float max_img_loss, int current_batch, int max_batches,
float precision, int draw_precision, char *accuracy_name, int dont_show, int mjpeg_port);
// Data augmentation

@ -11,6 +11,11 @@ void free_sublayer(layer *l)
}
void free_layer(layer l)
{
free_layer_custom(l, 0);
}
void free_layer_custom(layer l, int keep_cudnn_desc)
{
if (l.share_layer != NULL) return; // don't free shared layers
if (l.antialiasing) {
@ -204,31 +209,33 @@ void free_layer(layer l)
if (l.last_prev_state_gpu) cuda_free(l.last_prev_state_gpu);
if (l.last_prev_cell_gpu) cuda_free(l.last_prev_cell_gpu);
if (l.cell_gpu) cuda_free(l.cell_gpu);
#ifdef CUDNN_DISABLED // shouldn't be used for -map
if (l.srcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc));
if (l.dstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc));
if (l.srcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc16));
if (l.dstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc16));
if (l.dsrcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc));
if (l.ddstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc));
if (l.dsrcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc16));
if (l.ddstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc16));
if (l.normTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normTensorDesc));
if (l.normDstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDesc));
if (l.normDstTensorDescF16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDescF16));
#ifdef CUDNN // shouldn't be used for -map
if (!keep_cudnn_desc) {
if (l.srcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc));
if (l.dstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc));
if (l.srcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc16));
if (l.dstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc16));
if (l.dsrcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc));
if (l.ddstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc));
if (l.dsrcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc16));
if (l.ddstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc16));
if (l.normTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normTensorDesc));
if (l.normDstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDesc));
if (l.normDstTensorDescF16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDescF16));
if (l.weightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc));
if (l.weightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc16));
if (l.dweightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc));
if (l.dweightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc16));
if (l.weightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc));
if (l.weightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc16));
if (l.dweightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc));
if (l.dweightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc16));
if (l.convDesc) CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(l.convDesc));
if (l.convDesc) CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(l.convDesc));
if (l.poolingDesc) CHECK_CUDNN(cudnnDestroyPoolingDescriptor(l.poolingDesc));
if (l.poolingDesc) CHECK_CUDNN(cudnnDestroyPoolingDescriptor(l.poolingDesc));
//cudnnConvolutionFwdAlgo_t fw_algo, fw_algo16;
//cudnnConvolutionBwdDataAlgo_t bd_algo, bd_algo16;
//cudnnConvolutionBwdFilterAlgo_t bf_algo, bf_algo16;
//cudnnConvolutionFwdAlgo_t fw_algo, fw_algo16;
//cudnnConvolutionBwdDataAlgo_t bd_algo, bd_algo16;
//cudnnConvolutionBwdFilterAlgo_t bf_algo, bf_algo16;
}
#endif // CUDNN
#endif // GPU

Loading…
Cancel
Save