Merge pull request #1 from AlexeyAB/master

upstream
pull/4302/head
DongChan Cho 6 years ago committed by GitHub
commit fc3287e443
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 807
      build/darknet/x64/cfg/Gaussian_yolov3_BDD.cfg
  2. 807
      cfg/Gaussian_yolov3_BDD.cfg
  3. 2
      include/darknet.h
  4. 2
      src/box.c
  5. 2
      src/classifier.c
  6. 10
      src/coco.c
  7. 5
      src/convolutional_layer.c
  8. 18
      src/detector.c
  9. 284
      src/gaussian_yolo_layer.c
  10. 68
      src/gemm.c
  11. 1
      src/go.c
  12. 4
      src/image_opencv.cpp
  13. 7
      src/network.c
  14. 20
      src/parser.c
  15. 6
      src/utils.c
  16. 8
      src/yolo.c
  17. 148
      src/yolo_layer.c

@ -0,0 +1,807 @@
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=16
width=512
height=512
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
max_epochs = 300
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 6,7,8
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 3,4,5
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 0,1,2
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1

@ -0,0 +1,807 @@
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=64
subdivisions=16
width=512
height=512
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.0001
burn_in=1000
max_batches = 500200
policy=steps
steps=400000,450000
scales=.1,.1
max_epochs = 300
[convolutional]
batch_normalize=1
filters=32
size=3
stride=1
pad=1
activation=leaky
# Downsample
[convolutional]
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
# Downsample
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
filters=1024
size=3
stride=1
pad=1
activation=leaky
[shortcut]
from=-3
activation=linear
######################
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 6,7,8
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 61
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 3,4,5
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 36
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=57
activation=linear
[Gaussian_yolo]
mask = 0,1,2
anchors = 7,10, 14,24, 27,43, 32,97, 57,64, 92,109, 73,175, 141,178, 144,291
classes=10
num=9
jitter=.3
ignore_thresh = .5
truth_thresh = 1
iou_thresh=0.213
uc_normalizer=1.0
cls_normalizer=1.0
iou_normalizer=0.5
iou_loss=giou
scale_x_y=1.0
random=1

@ -280,6 +280,7 @@ struct layer {
int random;
float ignore_thresh;
float truth_thresh;
float iou_thresh;
float thresh;
float focus;
int classfix;
@ -329,6 +330,7 @@ struct layer {
float *weight_updates;
float scale_x_y;
float uc_normalizer;
float iou_normalizer;
float cls_normalizer;
IOU_LOSS iou_loss;

@ -424,7 +424,7 @@ int nms_comparator_v3(const void *pa, const void *pb)
detection b = *(detection *)pb;
float diff = 0;
if (b.sort_class >= 0) {
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
diff = a.prob[b.sort_class] - b.prob[b.sort_class]; // there is already: prob = objectness*prob
}
else {
diff = a.objectness - b.objectness;

@ -1288,4 +1288,6 @@ void run_classifier(int argc, char **argv)
else if(0==strcmp(argv[2], "valid10")) validate_classifier_10(data, cfg, weights);
else if(0==strcmp(argv[2], "validcrop")) validate_classifier_crop(data, cfg, weights);
else if(0==strcmp(argv[2], "validfull")) validate_classifier_full(data, cfg, weights);
if (gpus && gpu_list && ngpus > 1) free(gpus);
}

@ -226,6 +226,12 @@ void validate_coco(char *cfgfile, char *weightfile)
fprintf(fp, "\n]\n");
fclose(fp);
if (val) free(val);
if (val_resized) free(val_resized);
if (buf) free(buf);
if (buf_resized) free(buf_resized);
if (thr) free(thr);
fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)(time(0) - start));
}
@ -307,7 +313,9 @@ void validate_coco_recall(char *cfgfile, char *weightfile)
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
if (fps) free(fps);
if (id) free(id);
free_image(orig);
free_image(sized);
}

@ -405,6 +405,11 @@ convolutional_layer make_convolutional_layer(int batch, int steps, int h, int w,
l.nweights = (c / groups) * n * size * size;
if (l.share_layer) {
if (l.size != l.share_layer->size || l.nweights != l.share_layer->nweights || l.c != l.share_layer->c || l.n != l.share_layer->n) {
printf("Layer size, nweights, channels or filters don't match for the share_layer");
getchar();
}
l.weights = l.share_layer->weights;
l.weight_updates = l.share_layer->weight_updates;

@ -556,6 +556,7 @@ void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *out
for (j = 0; j < classes; ++j) {
if (fps) fclose(fps[j]);
}
if (fps) free(fps);
if (coco) {
#ifdef WIN32
fseek(fp, -3, SEEK_CUR);
@ -563,8 +564,15 @@ void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *out
fseek(fp, -2, SEEK_CUR);
#endif
fprintf(fp, "\n]\n");
fclose(fp);
}
if (fp) fclose(fp);
if (val) free(val);
if (val_resized) free(val_resized);
if (thr) free(thr);
if (buf) free(buf);
if (buf_resized) free(buf_resized);
fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)time(0) - start);
}
@ -793,6 +801,7 @@ float validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, floa
}
//detection *dets = get_network_boxes(&net, val[t].w, val[t].h, thresh, hier_thresh, 0, 1, &nboxes, letter_box); // for letter_box=1
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
//if (nms) do_nms_obj(dets, nboxes, l.classes, nms);
char labelpath[4096];
replace_image_to_label(path, labelpath);
@ -1099,6 +1108,11 @@ float validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, floa
else {
free_network(net);
}
if (val) free(val);
if (val_resized) free(val_resized);
if (thr) free(thr);
if (buf) free(buf);
if (buf_resized) free(buf_resized);
return mean_average_precision;
}
@ -1505,4 +1519,6 @@ void run_detector(int argc, char **argv)
free_list(options);
}
else printf(" There isn't such command: %s", argv[2]);
if (gpus && gpu_list && ngpus > 1) free(gpus);
}

@ -81,7 +81,7 @@ layer make_gaussian_yolo_layer(int batch, int w, int h, int n, int total, int *m
*/
#endif
fprintf(stderr, "Gaussian_yolo\n");
//fprintf(stderr, "Gaussian_yolo\n");
srand(time(0));
return l;
@ -140,32 +140,48 @@ box get_gaussian_yolo_box(float *x, float *biases, int n, int index, int i, int
return b;
}
float delta_gaussian_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride)
float delta_gaussian_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride, float iou_normalizer, IOU_LOSS iou_loss, float uc_normalizer, int accumulate)
{
box pred = get_gaussian_yolo_box(x, biases, n, index, i, j, lw, lh, w, h, stride);
float iou = box_iou(pred, truth);
float tx = (truth.x*lw - i);
float ty = (truth.y*lh - j);
float tw = log(truth.w*w / biases[2*n]);
float th = log(truth.h*h / biases[2*n + 1]);
float iou;
ious all_ious = { 0 };
all_ious.iou = box_iou(pred, truth);
all_ious.giou = box_giou(pred, truth);
if (pred.w == 0) { pred.w = 1.0; }
if (pred.h == 0) { pred.h = 1.0; }
float sigma_const = 0.3;
float epsi = pow(10,-9);
float in_exp_x = (tx - x[index + 0*stride])/x[index+1*stride];
float dx, dy, dw, dh;
iou = all_ious.iou;
float tx = (truth.x*lw - i);
float ty = (truth.y*lh - j);
float tw = log(truth.w*w / biases[2 * n]);
float th = log(truth.h*h / biases[2 * n + 1]);
dx = (tx - x[index + 0 * stride]);
dy = (ty - x[index + 2 * stride]);
dw = (tw - x[index + 4 * stride]);
dh = (th - x[index + 6 * stride]);
// Gaussian
float in_exp_x = dx / x[index+1*stride];
float in_exp_x_2 = pow(in_exp_x, 2);
float normal_dist_x = exp(in_exp_x_2*(-1./2.))/(sqrt(M_PI * 2.0)*(x[index+1*stride]+sigma_const));
float in_exp_y = (ty - x[index + 2*stride])/x[index+3*stride];
float in_exp_y = dy / x[index+3*stride];
float in_exp_y_2 = pow(in_exp_y, 2);
float normal_dist_y = exp(in_exp_y_2*(-1./2.))/(sqrt(M_PI * 2.0)*(x[index+3*stride]+sigma_const));
float in_exp_w = (tw - x[index + 4*stride])/x[index+5*stride];
float in_exp_w = dw / x[index+5*stride];
float in_exp_w_2 = pow(in_exp_w, 2);
float normal_dist_w = exp(in_exp_w_2*(-1./2.))/(sqrt(M_PI * 2.0)*(x[index+5*stride]+sigma_const));
float in_exp_h = (th - x[index + 6*stride])/x[index+7*stride];
float in_exp_h = dh / x[index+7*stride];
float in_exp_h_2 = pow(in_exp_h, 2);
float normal_dist_h = exp(in_exp_h_2*(-1./2.))/(sqrt(M_PI * 2.0)*(x[index+7*stride]+sigma_const));
@ -174,18 +190,96 @@ float delta_gaussian_yolo_box(box truth, float *x, float *biases, int n, int ind
float temp_w = (1./2.) * 1./(normal_dist_w+epsi) * normal_dist_w * scale;
float temp_h = (1./2.) * 1./(normal_dist_h+epsi) * normal_dist_h * scale;
delta[index + 0*stride] = temp_x * in_exp_x * (1./x[index+1*stride]);
delta[index + 2*stride] = temp_y * in_exp_y * (1./x[index+3*stride]);
delta[index + 4*stride] = temp_w * in_exp_w * (1./x[index+5*stride]);
delta[index + 6*stride] = temp_h * in_exp_h * (1./x[index+7*stride]);
if (!accumulate) {
delta[index + 0 * stride] = 0;
delta[index + 1 * stride] = 0;
delta[index + 2 * stride] = 0;
delta[index + 3 * stride] = 0;
delta[index + 4 * stride] = 0;
delta[index + 5 * stride] = 0;
delta[index + 6 * stride] = 0;
delta[index + 7 * stride] = 0;
}
delta[index + 1*stride] = temp_x * (in_exp_x_2/x[index+1*stride] - 1./(x[index+1*stride]+sigma_const));
delta[index + 3*stride] = temp_y * (in_exp_y_2/x[index+3*stride] - 1./(x[index+3*stride]+sigma_const));
delta[index + 5*stride] = temp_w * (in_exp_w_2/x[index+5*stride] - 1./(x[index+5*stride]+sigma_const));
delta[index + 7*stride] = temp_h * (in_exp_h_2/x[index+7*stride] - 1./(x[index+7*stride]+sigma_const));
float delta_x = temp_x * in_exp_x * (1. / x[index + 1 * stride]);
float delta_y = temp_y * in_exp_y * (1. / x[index + 3 * stride]);
float delta_w = temp_w * in_exp_w * (1. / x[index + 5 * stride]);
float delta_h = temp_h * in_exp_h * (1. / x[index + 7 * stride]);
float delta_ux = temp_x * (in_exp_x_2 / x[index + 1 * stride] - 1. / (x[index + 1 * stride] + sigma_const));
float delta_uy = temp_y * (in_exp_y_2 / x[index + 3 * stride] - 1. / (x[index + 3 * stride] + sigma_const));
float delta_uw = temp_w * (in_exp_w_2 / x[index + 5 * stride] - 1. / (x[index + 5 * stride] + sigma_const));
float delta_uh = temp_h * (in_exp_h_2 / x[index + 7 * stride] - 1. / (x[index + 7 * stride] + sigma_const));
if (iou_loss != MSE) {
// GIoU
iou = all_ious.giou;
// https://github.com/generalized-iou/g-darknet
// https://arxiv.org/abs/1902.09630v2
// https://giou.stanford.edu/
all_ious.dx_iou = dx_box_iou(pred, truth, iou_loss);
// jacobian^t (transpose)
float dx = (all_ious.dx_iou.dl + all_ious.dx_iou.dr);
float dy = (all_ious.dx_iou.dt + all_ious.dx_iou.db);
float dw = ((-0.5 * all_ious.dx_iou.dl) + (0.5 * all_ious.dx_iou.dr));
float dh = ((-0.5 * all_ious.dx_iou.dt) + (0.5 * all_ious.dx_iou.db));
// predict exponential, apply gradient of e^delta_t ONLY for w,h
dw *= exp(x[index + 4 * stride]);
dh *= exp(x[index + 6 * stride]);
// normalize iou weight, for GIoU
dx *= iou_normalizer;
dy *= iou_normalizer;
dw *= iou_normalizer;
dh *= iou_normalizer;
delta_x = dx;
delta_y = dy;
delta_w = dw;
delta_h = dh;
}
// normalize Uncertainty weight
delta_ux *= uc_normalizer;
delta_uy *= uc_normalizer;
delta_uw *= uc_normalizer;
delta_uh *= uc_normalizer;
delta[index + 0 * stride] += delta_x;
delta[index + 2 * stride] += delta_y;
delta[index + 4 * stride] += delta_w;
delta[index + 6 * stride] += delta_h;
delta[index + 1 * stride] += delta_ux;
delta[index + 3 * stride] += delta_uy;
delta[index + 5 * stride] += delta_uw;
delta[index + 7 * stride] += delta_uh;
return iou;
}
void averages_gaussian_yolo_deltas(int class_index, int box_index, int stride, int classes, float *delta)
{
int classes_in_one_box = 0;
int c;
for (c = 0; c < classes; ++c) {
if (delta[class_index + stride*c] > 0) classes_in_one_box++;
}
if (classes_in_one_box > 0) {
delta[box_index + 0 * stride] /= classes_in_one_box;
delta[box_index + 1 * stride] /= classes_in_one_box;
delta[box_index + 2 * stride] /= classes_in_one_box;
delta[box_index + 3 * stride] /= classes_in_one_box;
delta[box_index + 4 * stride] /= classes_in_one_box;
delta[box_index + 5 * stride] /= classes_in_one_box;
delta[box_index + 6 * stride] /= classes_in_one_box;
delta[box_index + 7 * stride] /= classes_in_one_box;
}
}
void delta_gaussian_yolo_class(float *output, float *delta, int index, int class_id, int classes, int stride, float *avg_cat)
{
@ -201,6 +295,19 @@ void delta_gaussian_yolo_class(float *output, float *delta, int index, int class
}
}
int compare_gaussian_yolo_class(float *output, int classes, int class_index, int stride, float objectness, int class_id, float conf_thresh)
{
int j;
for (j = 0; j < classes; ++j) {
//float prob = objectness * output[class_index + stride*j];
float prob = output[class_index + stride*j];
if (prob > conf_thresh) {
return 1;
}
}
return 0;
}
static int entry_gaussian_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
@ -254,12 +361,31 @@ void forward_gaussian_yolo_layer(const layer l, network_state state)
for (n = 0; n < l.n; ++n) {
int box_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 0);
box pred = get_gaussian_yolo_box(l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.w*l.h);
float best_match_iou = 0;
int best_match_t = 0;
float best_iou = 0;
int best_t = 0;
for(t = 0; t < l.max_boxes; ++t){
box truth = float_to_box_stride(state.truth + t*(4 + 1) + b*l.truths, 1);
int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
if (class_id >= l.classes) {
printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes - 1);
printf(" truth.x = %f, truth.y = %f, truth.w = %f, truth.h = %f, class_id = %d \n", truth.x, truth.y, truth.w, truth.h, class_id);
getchar();
continue; // if label contains class_id more than number of classes in the cfg-file
}
if(!truth.x) break;
int class_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 9);
int obj_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 8);
float objectness = l.output[obj_index];
int class_id_match = compare_gaussian_yolo_class(l.output, l.classes, class_index, l.w*l.h, objectness, class_id, 0.25f);
float iou = box_iou(pred, truth);
if (iou > best_match_iou && class_id_match == 1) {
best_match_iou = iou;
best_match_t = t;
}
if (iou > best_iou) {
best_iou = iou;
best_t = t;
@ -267,19 +393,19 @@ void forward_gaussian_yolo_layer(const layer l, network_state state)
}
int obj_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 8);
avg_anyobj += l.output[obj_index];
l.delta[obj_index] = 0 - l.output[obj_index];
if (best_iou > l.ignore_thresh) {
l.delta[obj_index] = l.cls_normalizer * (0 - l.output[obj_index]);
if (best_match_iou > l.ignore_thresh) {
l.delta[obj_index] = 0;
}
if (best_iou > l.truth_thresh) {
l.delta[obj_index] = 1 - l.output[obj_index];
l.delta[obj_index] = l.cls_normalizer * (1 - l.output[obj_index]);
int class_id = state.truth[best_t*(4 + 1) + b*l.truths + 4];
if (l.map) class_id = l.map[class_id];
int class_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 9);
delta_gaussian_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, 0);
box truth = float_to_box_stride(state.truth + best_t*(4 + 1) + b*l.truths, 1);
delta_gaussian_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
delta_gaussian_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, l.uc_normalizer, 1);
}
}
}
@ -308,11 +434,11 @@ void forward_gaussian_yolo_layer(const layer l, network_state state)
int mask_n = int_index(l.mask, best_n, l.n);
if(mask_n >= 0){
int box_index = entry_gaussian_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
float iou = delta_gaussian_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
float iou = delta_gaussian_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2-truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, l.uc_normalizer, 1);
int obj_index = entry_gaussian_index(l, b, mask_n*l.w*l.h + j*l.w + i, 8);
avg_obj += l.output[obj_index];
l.delta[obj_index] = 1 - l.output[obj_index];
l.delta[obj_index] = l.cls_normalizer * (1 - l.output[obj_index]);
int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
if (l.map) class_id = l.map[class_id];
@ -325,10 +451,116 @@ void forward_gaussian_yolo_layer(const layer l, network_state state)
if(iou > .75) recall75 += 1;
avg_iou += iou;
}
// iou_thresh
for (n = 0; n < l.total; ++n) {
int mask_n = int_index(l.mask, n, l.n);
if (mask_n >= 0 && n != best_n && l.iou_thresh < 1.0f) {
box pred = { 0 };
pred.w = l.biases[2 * n] / state.net.w;
pred.h = l.biases[2 * n + 1] / state.net.h;
float iou = box_iou(pred, truth_shift);
// iou, n
if (iou > l.iou_thresh) {
int box_index = entry_gaussian_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
float iou = delta_gaussian_yolo_box(truth, l.output, l.biases, n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, l.uc_normalizer, 1);
int obj_index = entry_gaussian_index(l, b, mask_n*l.w*l.h + j*l.w + i, 8);
avg_obj += l.output[obj_index];
l.delta[obj_index] = l.cls_normalizer * (1 - l.output[obj_index]);
int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
if (l.map) class_id = l.map[class_id];
int class_index = entry_gaussian_index(l, b, mask_n*l.w*l.h + j*l.w + i, 9);
delta_gaussian_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, &avg_cat);
++count;
++class_count;
if (iou > .5) recall += 1;
if (iou > .75) recall75 += 1;
avg_iou += iou;
}
}
}
}
// averages the deltas obtained by the function: delta_yolo_box()_accumulate
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 0);
int class_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 9);
const int stride = l.w*l.h;
averages_gaussian_yolo_deltas(class_index, box_index, stride, l.classes, l.delta);
}
}
}
}
// calculate: Classification-loss, IoU-loss and Uncertainty-loss
const int stride = l.w*l.h;
float* classification_lost = (float *)calloc(l.batch * l.outputs, sizeof(float));
memcpy(classification_lost, l.delta, l.batch * l.outputs * sizeof(float));
for (b = 0; b < l.batch; ++b) {
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 0);
classification_lost[box_index + 0 * stride] = 0;
classification_lost[box_index + 1 * stride] = 0;
classification_lost[box_index + 2 * stride] = 0;
classification_lost[box_index + 3 * stride] = 0;
classification_lost[box_index + 4 * stride] = 0;
classification_lost[box_index + 5 * stride] = 0;
classification_lost[box_index + 6 * stride] = 0;
classification_lost[box_index + 7 * stride] = 0;
}
}
}
}
float class_loss = pow(mag_array(classification_lost, l.outputs * l.batch), 2);
free(classification_lost);
float* except_uncertainty_lost = (float *)calloc(l.batch * l.outputs, sizeof(float));
memcpy(except_uncertainty_lost, l.delta, l.batch * l.outputs * sizeof(float));
for (b = 0; b < l.batch; ++b) {
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_gaussian_index(l, b, n*l.w*l.h + j*l.w + i, 0);
except_uncertainty_lost[box_index + 4 * stride] = 0;
except_uncertainty_lost[box_index + 5 * stride] = 0;
except_uncertainty_lost[box_index + 6 * stride] = 0;
except_uncertainty_lost[box_index + 7 * stride] = 0;
}
}
}
}
float except_uc_loss = pow(mag_array(except_uncertainty_lost, l.outputs * l.batch), 2);
free(except_uncertainty_lost);
*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f, count: %d\n", state.index, avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, recall75/count, count);
float loss = pow(mag_array(l.delta, l.outputs * l.batch), 2);
float uc_loss = loss - except_uc_loss;
float iou_loss = except_uc_loss - class_loss;
loss /= l.batch;
class_loss /= l.batch;
uc_loss /= l.batch;
iou_loss /= l.batch;
printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f, count: %d, loss = %.2f, class_loss = %.2f, iou_loss = %.2f, uc_loss = %.2f \n",
state.index, avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, recall75/count, count,
loss, class_loss, iou_loss, uc_loss);
}
void backward_gaussian_yolo_layer(const layer l, network_state state)

@ -324,7 +324,7 @@ void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y;
if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
@ -636,48 +636,48 @@ void check_cpu_features(void) {
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((int)1 << 23)) != 0;
HW_SSE = (info[3] & ((int)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((int)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((int)1 << 0)) != 0;
HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((int)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((int)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((int)1 << 20)) != 0;
HW_AES = (info[2] & ((int)1 << 25)) != 0;
HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
HW_AVX = (info[2] & ((int)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((int)1 << 12)) != 0;
HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((int)1 << 30)) != 0;
HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((int)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((int)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((int)1 << 8)) != 0;
HW_ADX = (info[1] & ((int)1 << 19)) != 0;
HW_SHA = (info[1] & ((int)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((int)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((int)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((int)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((int)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((int)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((int)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((int)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((int)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((int)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((int)1 << 1)) != 0;
HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((int)1 << 29)) != 0;
HW_ABM = (info[2] & ((int)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((int)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((int)1 << 16)) != 0;
HW_XOP = (info[2] & ((int)1 << 11)) != 0;
HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
}
}

@ -47,6 +47,7 @@ moves load_go_moves(char *filename)
printf("%d\n", count);
m.n = count;
m.data = (char**)realloc(m.data, count * sizeof(char*));
fclose(fp);
return m;
}

@ -703,11 +703,12 @@ int set_capture_position_frame_cv(cap_cv *cap, int index)
image get_image_from_stream_cpp(cap_cv *cap)
{
cv::Mat *src = new cv::Mat();
cv::Mat *src = NULL;
static int once = 1;
if (once) {
once = 0;
do {
if (src) delete src;
src = get_capture_frame_cv(cap);
if (!src) return make_empty_image(0, 0, 0);
} while (src->cols < 1 || src->rows < 1 || src->channels() < 1);
@ -719,6 +720,7 @@ image get_image_from_stream_cpp(cap_cv *cap)
if (!src) return make_empty_image(0, 0, 0);
image im = mat_to_image(*src);
rgbgr_image(im);
if (src) delete src;
return im;
}
// ----------------------------------------

@ -820,6 +820,7 @@ char *detection_to_json(detection *dets, int nboxes, int classes, char **names,
const float thresh = 0.005; // function get_network_boxes() has already filtred dets by actual threshold
char *send_buf = (char *)calloc(1024, sizeof(char));
if (!send_buf) return 0;
if (filename) {
sprintf(send_buf, "{\n \"frame_id\":%lld, \n \"filename\":\"%s\", \n \"objects\": [ \n", frame_id, filename);
}
@ -837,6 +838,7 @@ char *detection_to_json(detection *dets, int nboxes, int classes, char **names,
if (class_id != -1) strcat(send_buf, ", \n");
class_id = j;
char *buf = (char *)calloc(2048, sizeof(char));
if (!buf) return 0;
//sprintf(buf, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f}",
// image_id, j, dets[i].bbox.x, dets[i].bbox.y, dets[i].bbox.w, dets[i].bbox.h, dets[i].prob[j]);
@ -847,7 +849,10 @@ char *detection_to_json(detection *dets, int nboxes, int classes, char **names,
int buf_len = strlen(buf);
int total_len = send_buf_len + buf_len + 100;
send_buf = (char *)realloc(send_buf, total_len * sizeof(char));
if (!send_buf) return 0;// exit(-1);
if (!send_buf) {
if (buf) free(buf);
return 0;// exit(-1);
}
strcat(send_buf, buf);
free(buf);
}

@ -380,6 +380,7 @@ layer parse_yolo(list *options, size_params params)
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.iou_thresh = option_find_float_quiet(options, "iou_thresh", 1); // recommended to use iou_thresh=0.213 in [yolo]
l.random = option_find_int_quiet(options, "random", 0);
char *map_file = option_find_str(options, "map", 0);
@ -435,14 +436,29 @@ layer parse_gaussian_yolo(list *options, size_params params) // Gaussian_YOLOv3
char *a = option_find_str(options, "mask", 0);
int *mask = parse_gaussian_yolo_mask(a, &num);
layer l = make_gaussian_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes, max_boxes);
assert(l.outputs == params.inputs);
if (l.outputs != params.inputs) {
printf("Error: l.outputs == params.inputs \n");
printf("filters= in the [convolutional]-layer doesn't correspond to classes= or mask= in [Gaussian_yolo]-layer \n");
exit(EXIT_FAILURE);
}
//assert(l.outputs == params.inputs);
l.scale_x_y = option_find_float_quiet(options, "scale_x_y", 1);
l.max_boxes = option_find_int_quiet(options, "max", 90);
l.uc_normalizer = option_find_float_quiet(options, "uc_normalizer", 1.0);
l.iou_normalizer = option_find_float_quiet(options, "iou_normalizer", 0.75);
l.cls_normalizer = option_find_float_quiet(options, "cls_normalizer", 1.0);
char *iou_loss = option_find_str_quiet(options, "iou_loss", "mse"); // "iou");
if (strcmp(iou_loss, "mse") == 0) l.iou_loss = MSE;
else if (strcmp(iou_loss, "giou") == 0) l.iou_loss = GIOU;
else l.iou_loss = IOU;
fprintf(stderr, "[Gaussian_yolo] iou loss: %s, iou_norm: %2.2f, cls_norm: %2.2f, scale: %2.2f\n", (l.iou_loss == MSE ? "mse" : (l.iou_loss == GIOU ? "giou" : "iou")), l.iou_normalizer, l.cls_normalizer, l.scale_x_y);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.iou_thresh = option_find_float_quiet(options, "iou_thresh", 1); // recommended to use iou_thresh=0.213 in [yolo]
l.random = option_find_int_quiet(options, "random", 0);
char *map_file = option_find_str(options, "map", 0);

@ -41,6 +41,7 @@ int *read_map(char *filename)
map = (int*)realloc(map, n * sizeof(int));
map[n-1] = atoi(str);
}
if (file) fclose(file);
return map;
}
@ -65,6 +66,7 @@ void shuffle(void *arr, size_t n, size_t size)
memcpy((char*)arr+(j*size), (char*)arr+(i*size), size);
memcpy((char*)arr+(i*size), swp, size);
}
free(swp);
}
void del_arg(int argc, char **argv, int index)
@ -216,7 +218,7 @@ void find_replace_extension(char *str, char *orig, char *rep, char *output)
int offset = (p - buffer);
int chars_from_end = strlen(buffer) - offset;
if (!p || chars_from_end != strlen(orig)) { // Is 'orig' even in 'str' AND is 'orig' found at the end of 'str'?
sprintf(output, "%s", str);
sprintf(output, "%s", buffer);
free(buffer);
return;
}
@ -685,9 +687,9 @@ int max_index(float *a, int n)
int top_max_index(float *a, int n, int k)
{
if (n <= 0) return -1;
float *values = (float*)calloc(k, sizeof(float));
int *indexes = (int*)calloc(k, sizeof(int));
if (n <= 0) return -1;
int i, j;
for (i = 0; i < n; ++i) {
for (j = 0; j < k; ++j) {

@ -189,6 +189,14 @@ void validate_yolo(char *cfgfile, char *weightfile)
free_image(val_resized[t]);
}
}
if (fps) free(fps);
if (val) free(val);
if (val_resized) free(val_resized);
if (buf) free(buf);
if (buf_resized) free(buf_resized);
if (thr) free(thr);
fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)(time(0) - start));
}

@ -128,22 +128,7 @@ box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw
return b;
}
int compare_yolo_class(float *output, int classes, int class_index, int stride, float objectness, int class_id)
{
const float conf_thresh = 0.25;
int j;
for (j = 0; j < classes; ++j) {
float prob = objectness * output[class_index + stride*j];
if (prob > conf_thresh) {
return 1;
}
}
return 0;
}
ious delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride, float iou_normalizer, IOU_LOSS iou_loss)
ious delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride, float iou_normalizer, IOU_LOSS iou_loss, int accumulate)
{
ious all_ious = { 0 };
// i - step in layer width
@ -162,10 +147,11 @@ ious delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i,
float tw = log(truth.w*w / biases[2 * n]);
float th = log(truth.h*h / biases[2 * n + 1]);
delta[index + 0 * stride] = scale * (tx - x[index + 0 * stride]);
delta[index + 1 * stride] = scale * (ty - x[index + 1 * stride]);
delta[index + 2 * stride] = scale * (tw - x[index + 2 * stride]);
delta[index + 3 * stride] = scale * (th - x[index + 3 * stride]);
// accumulate delta
delta[index + 0 * stride] += scale * (tx - x[index + 0 * stride]);
delta[index + 1 * stride] += scale * (ty - x[index + 1 * stride]);
delta[index + 2 * stride] += scale * (tw - x[index + 2 * stride]);
delta[index + 3 * stride] += scale * (th - x[index + 3 * stride]);
}
else {
// https://github.com/generalized-iou/g-darknet
@ -174,25 +160,55 @@ ious delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i,
all_ious.dx_iou = dx_box_iou(pred, truth, iou_loss);
// jacobian^t (transpose)
delta[index + 0 * stride] = (all_ious.dx_iou.dl + all_ious.dx_iou.dr);
delta[index + 1 * stride] = (all_ious.dx_iou.dt + all_ious.dx_iou.db);
delta[index + 2 * stride] = ((-0.5 * all_ious.dx_iou.dl) + (0.5 * all_ious.dx_iou.dr));
delta[index + 3 * stride] = ((-0.5 * all_ious.dx_iou.dt) + (0.5 * all_ious.dx_iou.db));
float dx = (all_ious.dx_iou.dl + all_ious.dx_iou.dr);
float dy = (all_ious.dx_iou.dt + all_ious.dx_iou.db);
float dw = ((-0.5 * all_ious.dx_iou.dl) + (0.5 * all_ious.dx_iou.dr));
float dh = ((-0.5 * all_ious.dx_iou.dt) + (0.5 * all_ious.dx_iou.db));
// predict exponential, apply gradient of e^delta_t ONLY for w,h
delta[index + 2 * stride] *= exp(x[index + 2 * stride]);
delta[index + 3 * stride] *= exp(x[index + 3 * stride]);
dw *= exp(x[index + 2 * stride]);
dh *= exp(x[index + 3 * stride]);
// normalize iou weight
delta[index + 0 * stride] *= iou_normalizer;
delta[index + 1 * stride] *= iou_normalizer;
delta[index + 2 * stride] *= iou_normalizer;
delta[index + 3 * stride] *= iou_normalizer;
dx *= iou_normalizer;
dy *= iou_normalizer;
dw *= iou_normalizer;
dh *= iou_normalizer;
if (!accumulate) {
delta[index + 0 * stride] = 0;
delta[index + 1 * stride] = 0;
delta[index + 2 * stride] = 0;
delta[index + 3 * stride] = 0;
}
// accumulate delta
delta[index + 0 * stride] += dx;
delta[index + 1 * stride] += dy;
delta[index + 2 * stride] += dw;
delta[index + 3 * stride] += dh;
}
return all_ious;
}
void averages_yolo_deltas(int class_index, int box_index, int stride, int classes, float *delta)
{
int classes_in_one_box = 0;
int c;
for (c = 0; c < classes; ++c) {
if (delta[class_index + stride*c] > 0) classes_in_one_box++;
}
if (classes_in_one_box > 0) {
delta[box_index + 0 * stride] /= classes_in_one_box;
delta[box_index + 1 * stride] /= classes_in_one_box;
delta[box_index + 2 * stride] /= classes_in_one_box;
delta[box_index + 3 * stride] /= classes_in_one_box;
}
}
void delta_yolo_class(float *output, float *delta, int index, int class_id, int classes, int stride, float *avg_cat, int focal_loss)
{
int n;
@ -230,6 +246,19 @@ void delta_yolo_class(float *output, float *delta, int index, int class_id, int
}
}
int compare_yolo_class(float *output, int classes, int class_index, int stride, float objectness, int class_id, float conf_thresh)
{
int j;
for (j = 0; j < classes; ++j) {
//float prob = objectness * output[class_index + stride*j];
float prob = output[class_index + stride*j];
if (prob > conf_thresh) {
return 1;
}
}
return 0;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
@ -254,6 +283,7 @@ void forward_yolo_layer(const layer l, network_state state)
}
#endif
// delta is zeroed
memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
if (!state.train) return;
//float avg_iou = 0;
@ -293,7 +323,7 @@ void forward_yolo_layer(const layer l, network_state state)
int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4);
float objectness = l.output[obj_index];
int class_id_match = compare_yolo_class(l.output, l.classes, class_index, l.w*l.h, objectness, class_id);
int class_id_match = compare_yolo_class(l.output, l.classes, class_index, l.w*l.h, objectness, class_id, 0.25f);
float iou = box_iou(pred, truth);
if (iou > best_match_iou && class_id_match == 1) {
@ -319,7 +349,7 @@ void forward_yolo_layer(const layer l, network_state state)
int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
delta_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, 0, l.focal_loss);
box truth = float_to_box_stride(state.truth + best_t*(4 + 1) + b*l.truths, 1);
delta_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss);
delta_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, 1);
}
}
}
@ -353,7 +383,7 @@ void forward_yolo_layer(const layer l, network_state state)
int mask_n = int_index(l.mask, best_n, l.n);
if (mask_n >= 0) {
int box_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
ious all_ious = delta_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss);
ious all_ious = delta_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, 1);
// range is 0 <= 1
tot_iou += all_ious.iou;
@ -376,8 +406,60 @@ void forward_yolo_layer(const layer l, network_state state)
if (all_ious.iou > .5) recall += 1;
if (all_ious.iou > .75) recall75 += 1;
}
// iou_thresh
for (n = 0; n < l.total; ++n) {
int mask_n = int_index(l.mask, n, l.n);
if (mask_n >= 0 && n != best_n && l.iou_thresh < 1.0f) {
box pred = { 0 };
pred.w = l.biases[2 * n] / state.net.w;
pred.h = l.biases[2 * n + 1] / state.net.h;
float iou = box_iou(pred, truth_shift);
// iou, n
if (iou > l.iou_thresh) {
int box_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
ious all_ious = delta_yolo_box(truth, l.output, l.biases, n, box_index, i, j, l.w, l.h, state.net.w, state.net.h, l.delta, (2 - truth.w*truth.h), l.w*l.h, l.iou_normalizer, l.iou_loss, 1);
// range is 0 <= 1
tot_iou += all_ious.iou;
tot_iou_loss += 1 - all_ious.iou;
// range is -1 <= giou <= 1
tot_giou += all_ious.giou;
tot_giou_loss += 1 - all_ious.giou;
int obj_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4);
avg_obj += l.output[obj_index];
l.delta[obj_index] = l.cls_normalizer * (1 - l.output[obj_index]);
int class_id = state.truth[t*(4 + 1) + b*l.truths + 4];
if (l.map) class_id = l.map[class_id];
int class_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4 + 1);
delta_yolo_class(l.output, l.delta, class_index, class_id, l.classes, l.w*l.h, &avg_cat, l.focal_loss);
++count;
++class_count;
if (all_ious.iou > .5) recall += 1;
if (all_ious.iou > .75) recall75 += 1;
}
}
}
}
// averages the deltas obtained by the function: delta_yolo_box()_accumulate
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w; ++i) {
for (n = 0; n < l.n; ++n) {
int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
const int stride = l.w*l.h;
averages_yolo_deltas(class_index, box_index, stride, l.classes, l.delta);
}
}
}
}
//*(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
//printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f, count: %d\n", state.index, avg_iou / count, avg_cat / class_count, avg_obj / count, avg_anyobj / (l.w*l.h*l.n*l.batch), recall / count, recall75 / count, count);

Loading…
Cancel
Save