mirror of https://github.com/AlexeyAB/darknet.git
parent
dc0d7bb8a8
commit
516f019ba6
31 changed files with 1263 additions and 1832 deletions
@ -1,33 +1,19 @@ |
||||
#ifndef COST_LAYER_H |
||||
#define COST_LAYER_H |
||||
#include "params.h" |
||||
#include "layer.h" |
||||
|
||||
typedef enum{ |
||||
SSE, MASKED |
||||
} COST_TYPE; |
||||
|
||||
typedef struct { |
||||
int inputs; |
||||
int batch; |
||||
int coords; |
||||
int classes; |
||||
float *delta; |
||||
float *output; |
||||
COST_TYPE type; |
||||
#ifdef GPU |
||||
float * delta_gpu; |
||||
#endif |
||||
} cost_layer; |
||||
typedef layer cost_layer; |
||||
|
||||
COST_TYPE get_cost_type(char *s); |
||||
char *get_cost_string(COST_TYPE a); |
||||
cost_layer *make_cost_layer(int batch, int inputs, COST_TYPE type); |
||||
void forward_cost_layer(const cost_layer layer, network_state state); |
||||
void backward_cost_layer(const cost_layer layer, network_state state); |
||||
cost_layer make_cost_layer(int batch, int inputs, COST_TYPE type); |
||||
void forward_cost_layer(const cost_layer l, network_state state); |
||||
void backward_cost_layer(const cost_layer l, network_state state); |
||||
|
||||
#ifdef GPU |
||||
void forward_cost_layer_gpu(cost_layer layer, network_state state); |
||||
void backward_cost_layer_gpu(const cost_layer layer, network_state state); |
||||
void forward_cost_layer_gpu(cost_layer l, network_state state); |
||||
void backward_cost_layer_gpu(const cost_layer l, network_state state); |
||||
#endif |
||||
|
||||
#endif |
||||
|
@ -1,27 +1,20 @@ |
||||
#ifndef DROPOUT_LAYER_H |
||||
#define DROPOUT_LAYER_H |
||||
|
||||
#include "params.h" |
||||
#include "layer.h" |
||||
|
||||
typedef struct{ |
||||
int batch; |
||||
int inputs; |
||||
float probability; |
||||
float scale; |
||||
float *rand; |
||||
#ifdef GPU |
||||
float * rand_gpu; |
||||
#endif |
||||
} dropout_layer; |
||||
typedef layer dropout_layer; |
||||
|
||||
dropout_layer *make_dropout_layer(int batch, int inputs, float probability); |
||||
dropout_layer make_dropout_layer(int batch, int inputs, float probability); |
||||
|
||||
void forward_dropout_layer(dropout_layer layer, network_state state); |
||||
void backward_dropout_layer(dropout_layer layer, network_state state); |
||||
void resize_dropout_layer(dropout_layer *layer, int inputs); |
||||
void forward_dropout_layer(dropout_layer l, network_state state); |
||||
void backward_dropout_layer(dropout_layer l, network_state state); |
||||
void resize_dropout_layer(dropout_layer *l, int inputs); |
||||
|
||||
#ifdef GPU |
||||
void forward_dropout_layer_gpu(dropout_layer layer, network_state state); |
||||
void backward_dropout_layer_gpu(dropout_layer layer, network_state state); |
||||
void forward_dropout_layer_gpu(dropout_layer l, network_state state); |
||||
void backward_dropout_layer_gpu(dropout_layer l, network_state state); |
||||
|
||||
#endif |
||||
#endif |
||||
|
@ -1,96 +0,0 @@ |
||||
#include "normalization_layer.h" |
||||
#include <stdio.h> |
||||
|
||||
image get_normalization_image(normalization_layer layer) |
||||
{ |
||||
int h = layer.h; |
||||
int w = layer.w; |
||||
int c = layer.c; |
||||
return float_to_image(w,h,c,layer.output); |
||||
} |
||||
|
||||
image get_normalization_delta(normalization_layer layer) |
||||
{ |
||||
int h = layer.h; |
||||
int w = layer.w; |
||||
int c = layer.c; |
||||
return float_to_image(w,h,c,layer.delta); |
||||
} |
||||
|
||||
normalization_layer *make_normalization_layer(int batch, int h, int w, int c, int size, float alpha, float beta, float kappa) |
||||
{ |
||||
fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", h,w,c,size); |
||||
normalization_layer *layer = calloc(1, sizeof(normalization_layer)); |
||||
layer->batch = batch; |
||||
layer->h = h; |
||||
layer->w = w; |
||||
layer->c = c; |
||||
layer->kappa = kappa; |
||||
layer->size = size; |
||||
layer->alpha = alpha; |
||||
layer->beta = beta; |
||||
layer->output = calloc(h * w * c * batch, sizeof(float)); |
||||
layer->delta = calloc(h * w * c * batch, sizeof(float)); |
||||
layer->sums = calloc(h*w, sizeof(float)); |
||||
return layer; |
||||
} |
||||
|
||||
void resize_normalization_layer(normalization_layer *layer, int h, int w) |
||||
{ |
||||
layer->h = h; |
||||
layer->w = w; |
||||
layer->output = realloc(layer->output, h * w * layer->c * layer->batch * sizeof(float)); |
||||
layer->delta = realloc(layer->delta, h * w * layer->c * layer->batch * sizeof(float)); |
||||
layer->sums = realloc(layer->sums, h*w * sizeof(float)); |
||||
} |
||||
|
||||
void add_square_array(float *src, float *dest, int n) |
||||
{ |
||||
int i; |
||||
for(i = 0; i < n; ++i){ |
||||
dest[i] += src[i]*src[i]; |
||||
} |
||||
} |
||||
void sub_square_array(float *src, float *dest, int n) |
||||
{ |
||||
int i; |
||||
for(i = 0; i < n; ++i){ |
||||
dest[i] -= src[i]*src[i]; |
||||
} |
||||
} |
||||
|
||||
void forward_normalization_layer(const normalization_layer layer, network_state state) |
||||
{ |
||||
int i,j,k; |
||||
memset(layer.sums, 0, layer.h*layer.w*sizeof(float)); |
||||
int imsize = layer.h*layer.w; |
||||
for(j = 0; j < layer.size/2; ++j){ |
||||
if(j < layer.c) add_square_array(state.input+j*imsize, layer.sums, imsize); |
||||
} |
||||
for(k = 0; k < layer.c; ++k){ |
||||
int next = k+layer.size/2; |
||||
int prev = k-layer.size/2-1; |
||||
if(next < layer.c) add_square_array(state.input+next*imsize, layer.sums, imsize); |
||||
if(prev > 0) sub_square_array(state.input+prev*imsize, layer.sums, imsize); |
||||
for(i = 0; i < imsize; ++i){ |
||||
layer.output[k*imsize + i] = state.input[k*imsize+i] / pow(layer.kappa + layer.alpha * layer.sums[i], layer.beta); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void backward_normalization_layer(const normalization_layer layer, network_state state) |
||||
{ |
||||
// TODO!
|
||||
// OR NOT TODO!!
|
||||
} |
||||
|
||||
void visualize_normalization_layer(normalization_layer layer, char *window) |
||||
{ |
||||
image delta = get_normalization_image(layer); |
||||
image dc = collapse_image_layers(delta, 1); |
||||
char buff[256]; |
||||
sprintf(buff, "%s: Output", window); |
||||
show_image(dc, buff); |
||||
save_image(dc, buff); |
||||
free_image(dc); |
||||
} |
@ -1,27 +0,0 @@ |
||||
#ifndef NORMALIZATION_LAYER_H |
||||
#define NORMALIZATION_LAYER_H |
||||
|
||||
#include "image.h" |
||||
#include "params.h" |
||||
|
||||
typedef struct { |
||||
int batch; |
||||
int h,w,c; |
||||
int size; |
||||
float alpha; |
||||
float beta; |
||||
float kappa; |
||||
float *delta; |
||||
float *output; |
||||
float *sums; |
||||
} normalization_layer; |
||||
|
||||
image get_normalization_image(normalization_layer layer); |
||||
normalization_layer *make_normalization_layer(int batch, int h, int w, int c, int size, float alpha, float beta, float kappa); |
||||
void resize_normalization_layer(normalization_layer *layer, int h, int w); |
||||
void forward_normalization_layer(const normalization_layer layer, network_state state); |
||||
void backward_normalization_layer(const normalization_layer layer, network_state state); |
||||
void visualize_normalization_layer(normalization_layer layer, char *window); |
||||
|
||||
#endif |
||||
|
@ -1,28 +1,17 @@ |
||||
#ifndef ROUTE_LAYER_H |
||||
#define ROUTE_LAYER_H |
||||
#include "network.h" |
||||
#include "layer.h" |
||||
|
||||
typedef struct { |
||||
int batch; |
||||
int outputs; |
||||
int n; |
||||
int * input_layers; |
||||
int * input_sizes; |
||||
float * delta; |
||||
float * output; |
||||
#ifdef GPU |
||||
float * delta_gpu; |
||||
float * output_gpu; |
||||
#endif |
||||
} route_layer; |
||||
typedef layer route_layer; |
||||
|
||||
route_layer *make_route_layer(int batch, int n, int *input_layers, int *input_size); |
||||
void forward_route_layer(const route_layer layer, network net); |
||||
void backward_route_layer(const route_layer layer, network net); |
||||
route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size); |
||||
void forward_route_layer(const route_layer l, network net); |
||||
void backward_route_layer(const route_layer l, network net); |
||||
|
||||
#ifdef GPU |
||||
void forward_route_layer_gpu(const route_layer layer, network net); |
||||
void backward_route_layer_gpu(const route_layer layer, network net); |
||||
void forward_route_layer_gpu(const route_layer l, network net); |
||||
void backward_route_layer_gpu(const route_layer l, network net); |
||||
#endif |
||||
|
||||
#endif |
||||
|
@ -1,28 +1,19 @@ |
||||
#ifndef SOFTMAX_LAYER_H |
||||
#define SOFTMAX_LAYER_H |
||||
#include "params.h" |
||||
#include "layer.h" |
||||
|
||||
typedef struct { |
||||
int inputs; |
||||
int batch; |
||||
int groups; |
||||
float *delta; |
||||
float *output; |
||||
#ifdef GPU |
||||
float * delta_gpu; |
||||
float * output_gpu; |
||||
#endif |
||||
} softmax_layer; |
||||
typedef layer softmax_layer; |
||||
|
||||
void softmax_array(float *input, int n, float *output); |
||||
softmax_layer *make_softmax_layer(int batch, int inputs, int groups); |
||||
void forward_softmax_layer(const softmax_layer layer, network_state state); |
||||
void backward_softmax_layer(const softmax_layer layer, network_state state); |
||||
softmax_layer make_softmax_layer(int batch, int inputs, int groups); |
||||
void forward_softmax_layer(const softmax_layer l, network_state state); |
||||
void backward_softmax_layer(const softmax_layer l, network_state state); |
||||
|
||||
#ifdef GPU |
||||
void pull_softmax_layer_output(const softmax_layer layer); |
||||
void forward_softmax_layer_gpu(const softmax_layer layer, network_state state); |
||||
void backward_softmax_layer_gpu(const softmax_layer layer, network_state state); |
||||
void pull_softmax_layer_output(const softmax_layer l); |
||||
void forward_softmax_layer_gpu(const softmax_layer l, network_state state); |
||||
void backward_softmax_layer_gpu(const softmax_layer l, network_state state); |
||||
#endif |
||||
|
||||
#endif |
||||
|
Loading…
Reference in new issue