pull/5299/head
Joseph Redmon 9 years ago
parent c7b10ceadb
commit 9942d48412
  1. 6
      Makefile
  2. 14
      src/batchnorm_layer.c
  3. 2
      src/batchnorm_layer.h
  4. 97
      src/data.c
  5. 6
      src/data.h
  6. 29
      src/parser.c
  7. 61
      src/rnn.c

@ -1,5 +1,5 @@
GPU=0
OPENCV=0
GPU=1
OPENCV=1
DEBUG=0
ARCH= --gpu-architecture=compute_20 --gpu-code=compute_20
@ -34,7 +34,7 @@ CFLAGS+= -DGPU
LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand
endif
OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o imagenet.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o coco_demo.o tag.o cifar.o yolo_demo.o go.o batchnorm_layer.o
OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o imagenet.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo2.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o coco_demo.o tag.o cifar.o yolo_demo.o go.o batchnorm_layer.o
ifeq ($(GPU), 1)
LDFLAGS+= -lstdc++
OBJ+=convolutional_kernels.o deconvolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o softmax_layer_kernels.o network_kernels.o avgpool_layer_kernels.o

@ -135,6 +135,20 @@ void backward_batchnorm_layer(const layer layer, network_state state)
}
#ifdef GPU
void pull_batchnorm_layer(layer l)
{
cuda_pull_array(l.scales_gpu, l.scales, l.c);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
}
void push_batchnorm_layer(layer l)
{
cuda_push_array(l.scales_gpu, l.scales, l.c);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
}
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);

@ -12,6 +12,8 @@ void backward_batchnorm_layer(layer l, network_state state);
#ifdef GPU
void forward_batchnorm_layer_gpu(layer l, network_state state);
void backward_batchnorm_layer_gpu(layer l, network_state state);
void pull_batchnorm_layer(layer l);
void push_batchnorm_layer(layer l);
#endif
#endif

@ -271,78 +271,37 @@ void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int
free(boxes);
}
void fill_truth_detection(char *path, float *truth, int classes, int num_boxes, int flip, int background, float dx, float dy, float sx, float sy)
void fill_truth_detection(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char *labelpath = find_replace(path, "JPEGImages", "labels");
char *labelpath = find_replace(path, "images", "labels");
labelpath = find_replace(labelpath, "JPEGImages", "labels");
labelpath = find_replace(labelpath, ".jpg", ".txt");
labelpath = find_replace(labelpath, ".JPG", ".txt");
labelpath = find_replace(labelpath, ".JPEG", ".txt");
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > 17) count = 17;
float x,y,w,h;
float left, top, right, bot;
int id;
int i;
if(background){
for(i = 0; i < num_boxes*num_boxes*(4+classes+background); i += 4+classes+background){
truth[i] = 1;
}
}
for (i = 0; i < count; ++i) {
left = boxes[i].left * sx - dx;
right = boxes[i].right * sx - dx;
top = boxes[i].top * sy - dy;
bot = boxes[i].bottom* sy - dy;
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if(flip){
float swap = left;
left = 1. - right;
right = 1. - swap;
}
left = constrain(0, 1, left);
right = constrain(0, 1, right);
top = constrain(0, 1, top);
bot = constrain(0, 1, bot);
x = (left+right)/2;
y = (top+bot)/2;
w = (right - left);
h = (bot - top);
if (x <= 0 || x >= 1 || y <= 0 || y >= 1) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
/*
float maxwidth = distance_from_edge(i, num_boxes);
float maxheight = distance_from_edge(j, num_boxes);
w = w/maxwidth;
h = h/maxheight;
*/
w = constrain(0, 1, w);
h = constrain(0, 1, h);
if (w < .01 || h < .01) continue;
if(1){
w = pow(w, 1./2.);
h = pow(h, 1./2.);
}
int index = (col+row*num_boxes)*(4+classes+background);
if(truth[index+classes+background+2]) continue;
if(background) truth[index++] = 0;
truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
truth[i*5] = id;
truth[i*5+2] = x;
truth[i*5+3] = y;
truth[i*5+4] = w;
truth[i*5+5] = h;
}
free(boxes);
}
@ -485,6 +444,7 @@ data load_data_region(int n, char **paths, int m, int w, int h, int size, int cl
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
@ -641,7 +601,7 @@ data load_data_swag(char **paths, int n, int classes, float jitter)
return d;
}
data load_data_detection(int n, char **paths, int m, int classes, int w, int h, int num_boxes, int background)
data load_data_detection(int n, int boxes, char **paths, int m, int w, int h, int classes, float jitter)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
@ -652,16 +612,15 @@ data load_data_detection(int n, char **paths, int m, int classes, int w, int h,
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = num_boxes*num_boxes*(4+classes+background);
d.y = make_matrix(n, k);
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = ow/10;
int dh = oh/10;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
@ -674,13 +633,6 @@ data load_data_detection(int n, char **paths, int m, int classes, int w, int h,
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
/*
float angle = rand_uniform()*.1 - .05;
image rot = rotate_image(orig, angle);
free_image(orig);
orig = rot;
*/
int flip = rand_r(&data_seed)%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
@ -691,7 +643,7 @@ data load_data_detection(int n, char **paths, int m, int classes, int w, int h,
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], d.y.vals[i], classes, num_boxes, flip, background, dx, dy, 1./sx, 1./sy);
fill_truth_detection(random_paths[i], d.y.vals[i], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
@ -700,6 +652,7 @@ data load_data_detection(int n, char **paths, int m, int classes, int w, int h,
return d;
}
void *load_thread(void *ptr)
{
@ -717,7 +670,7 @@ void *load_thread(void *ptr)
} else if (a.type == STUDY_DATA){
*a.d = load_data_study(a.paths, a.n, a.m, a.labels, a.classes, a.min, a.max, a.size);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.classes, a.w, a.h, a.num_boxes, a.background);
*a.d = load_data_detection(a.n, a.num_boxes, a.paths, a.m, a.classes, a.w, a.h, a.background);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == REGION_DATA){

@ -25,10 +25,12 @@ typedef struct{
matrix y;
int *indexes;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA
} data_type;
typedef struct load_args{
@ -68,7 +70,7 @@ void print_letters(float *pred, int n);
data load_data_captcha(char **paths, int n, int m, int k, int w, int h);
data load_data_captcha_encode(char **paths, int n, int m, int w, int h);
data load_data(char **paths, int n, int m, char **labels, int k, int w, int h);
data load_data_detection(int n, char **paths, int m, int classes, int w, int h, int num_boxes, int background);
data load_data_detection(int n, int boxes, char **paths, int m, int w, int h, int classes, float jitter);
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size);
data load_data_augment(char **paths, int n, int m, char **labels, int k, int min, int max, int size);
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size);

@ -852,6 +852,18 @@ void save_convolutional_weights(layer l, FILE *fp)
fwrite(l.filters, sizeof(float), num, fp);
}
void save_batchnorm_weights(layer l, FILE *fp)
{
#ifdef GPU
if(gpu_index >= 0){
pull_batchnorm_layer(l);
}
#endif
fwrite(l.scales, sizeof(float), l.c, fp);
fwrite(l.rolling_mean, sizeof(float), l.c, fp);
fwrite(l.rolling_variance, sizeof(float), l.c, fp);
}
void save_connected_weights(layer l, FILE *fp)
{
#ifdef GPU
@ -889,6 +901,8 @@ void save_weights_upto(network net, char *filename, int cutoff)
save_convolutional_weights(l, fp);
} if(l.type == CONNECTED){
save_connected_weights(l, fp);
} if(l.type == BATCHNORM){
save_batchnorm_weights(l, fp);
} if(l.type == RNN){
save_connected_weights(*(l.input_layer), fp);
save_connected_weights(*(l.self_layer), fp);
@ -960,6 +974,18 @@ void load_connected_weights(layer l, FILE *fp, int transpose)
#endif
}
void load_batchnorm_weights(layer l, FILE *fp)
{
fread(l.scales, sizeof(float), l.c, fp);
fread(l.rolling_mean, sizeof(float), l.c, fp);
fread(l.rolling_variance, sizeof(float), l.c, fp);
#ifdef GPU
if(gpu_index >= 0){
push_batchnorm_layer(l);
}
#endif
}
void load_convolutional_weights_binary(layer l, FILE *fp)
{
fread(l.biases, sizeof(float), l.n, fp);
@ -1053,6 +1079,9 @@ void load_weights_upto(network *net, char *filename, int cutoff)
if(l.type == CONNECTED){
load_connected_weights(l, fp, transpose);
}
if(l.type == BATCHNORM){
load_batchnorm_weights(l, fp);
}
if(l.type == CRNN){
load_convolutional_weights(*(l.input_layer), fp);
load_convolutional_weights(*(l.self_layer), fp);

@ -183,7 +183,7 @@ void test_char_rnn(char *cfgfile, char *weightfile, int num, char *seed, float t
printf("\n");
}
void valid_char_rnn(char *cfgfile, char *weightfile)
void valid_char_rnn(char *cfgfile, char *weightfile, char *seed)
{
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
@ -196,18 +196,22 @@ void valid_char_rnn(char *cfgfile, char *weightfile)
int count = 0;
int c;
int len = strlen(seed);
float *input = calloc(inputs, sizeof(float));
int i;
for(i = 0; i < 100; ++i){
for(i = 0; i < len; ++i){
c = seed[i];
input[(int)c] = 1;
network_predict(net, input);
input[(int)c] = 0;
}
float sum = 0;
c = getc(stdin);
float log2 = log(2);
while(c != EOF){
int next = getc(stdin);
if(next < 0 || next >= 255) error("Out of range character");
if(next == EOF) break;
if(next < 0 || next >= 255) error("Out of range character");
++count;
input[c] = 1;
float *out = network_predict(net, input);
@ -218,6 +222,52 @@ void valid_char_rnn(char *cfgfile, char *weightfile)
}
}
void vec_char_rnn(char *cfgfile, char *weightfile, char *seed)
{
char *base = basecfg(cfgfile);
fprintf(stderr, "%s\n", base);
network net = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&net, weightfile);
}
int inputs = get_network_input_size(net);
int c;
int seed_len = strlen(seed);
float *input = calloc(inputs, sizeof(float));
int i;
char *line;
while((line=fgetl(stdin)) != 0){
reset_rnn_state(net, 0);
for(i = 0; i < seed_len; ++i){
c = seed[i];
input[(int)c] = 1;
network_predict(net, input);
input[(int)c] = 0;
}
strip(line);
int str_len = strlen(line);
for(i = 0; i < str_len; ++i){
c = line[i];
input[(int)c] = 1;
network_predict(net, input);
input[(int)c] = 0;
}
c = ' ';
input[(int)c] = 1;
network_predict(net, input);
input[(int)c] = 0;
layer l = net.layers[0];
cuda_pull_array(l.output_gpu, l.output, l.outputs);
printf("%s", line);
for(i = 0; i < l.outputs; ++i){
printf(",%g", l.output[i]);
}
printf("\n");
}
}
void run_char_rnn(int argc, char **argv)
{
@ -226,7 +276,7 @@ void run_char_rnn(int argc, char **argv)
return;
}
char *filename = find_char_arg(argc, argv, "-file", "data/shakespeare.txt");
char *seed = find_char_arg(argc, argv, "-seed", "\n");
char *seed = find_char_arg(argc, argv, "-seed", "\n\n");
int len = find_int_arg(argc, argv, "-len", 1000);
float temp = find_float_arg(argc, argv, "-temp", .7);
int rseed = find_int_arg(argc, argv, "-srand", time(0));
@ -235,6 +285,7 @@ void run_char_rnn(int argc, char **argv)
char *cfg = argv[3];
char *weights = (argc > 4) ? argv[4] : 0;
if(0==strcmp(argv[2], "train")) train_char_rnn(cfg, weights, filename, clear);
else if(0==strcmp(argv[2], "valid")) valid_char_rnn(cfg, weights);
else if(0==strcmp(argv[2], "valid")) valid_char_rnn(cfg, weights, seed);
else if(0==strcmp(argv[2], "vec")) vec_char_rnn(cfg, weights, seed);
else if(0==strcmp(argv[2], "generate")) test_char_rnn(cfg, weights, len, seed, temp, rseed);
}

Loading…
Cancel
Save