Merge branch 'dev/alexey/master' into dev/cenit/opencv_fix

pull/3685/head
Stefano Sinigardi 6 years ago
commit b2b769c099
  1. 2
      Makefile
  2. 2
      build/darknet/darknet.vcxproj
  3. 2
      build/darknet/darknet_no_gpu.vcxproj
  4. 2
      build/darknet/yolo_cpp_dll.vcxproj
  5. 2
      build/darknet/yolo_cpp_dll_no_gpu.vcxproj
  6. 1
      include/darknet.h
  7. 8
      src/network.c
  8. 24
      src/parser.c
  9. 118
      src/sam_layer.c
  10. 23
      src/sam_layer.h

@ -118,7 +118,7 @@ LDFLAGS+= -L/usr/local/zed/lib -lsl_core -lsl_input -lsl_zed
#-lstdc++ -D_GLIBCXX_USE_CXX11_ABI=0
endif
OBJ=image_opencv.o http_stream.o gemm.o utils.o dark_cuda.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o detector.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o demo.o tag.o cifar.o go.o batchnorm_layer.o art.o region_layer.o reorg_layer.o reorg_old_layer.o super.o voxel.o tree.o yolo_layer.o upsample_layer.o lstm_layer.o conv_lstm_layer.o scale_channels_layer.o
OBJ=image_opencv.o http_stream.o gemm.o utils.o dark_cuda.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o detector.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o demo.o tag.o cifar.o go.o batchnorm_layer.o art.o region_layer.o reorg_layer.o reorg_old_layer.o super.o voxel.o tree.o yolo_layer.o upsample_layer.o lstm_layer.o conv_lstm_layer.o scale_channels_layer.o sam_layer.o
ifeq ($(GPU), 1)
LDFLAGS+= -lstdc++
OBJ+=convolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o network_kernels.o avgpool_layer_kernels.o

@ -226,6 +226,7 @@
<ClCompile Include="..\..\src\rnn_layer.c" />
<ClCompile Include="..\..\src\rnn_vid.c" />
<ClCompile Include="..\..\src\route_layer.c" />
<ClCompile Include="..\..\src\sam_layer.c" />
<ClCompile Include="..\..\src\scale_channels_layer.c" />
<ClCompile Include="..\..\src\shortcut_layer.c" />
<ClCompile Include="..\..\src\softmax_layer.c" />
@ -285,6 +286,7 @@
<ClInclude Include="..\..\src\reorg_old_layer.h" />
<ClInclude Include="..\..\src\rnn_layer.h" />
<ClInclude Include="..\..\src\route_layer.h" />
<ClInclude Include="..\..\src\sam_layer.h" />
<ClInclude Include="..\..\src\scale_channels_layer.h" />
<ClInclude Include="..\..\src\shortcut_layer.h" />
<ClInclude Include="..\..\src\softmax_layer.h" />

@ -230,6 +230,7 @@
<ClCompile Include="..\..\src\rnn_layer.c" />
<ClCompile Include="..\..\src\rnn_vid.c" />
<ClCompile Include="..\..\src\route_layer.c" />
<ClCompile Include="..\..\src\sam_layer.c" />
<ClCompile Include="..\..\src\scale_channels_layer.c" />
<ClCompile Include="..\..\src\shortcut_layer.c" />
<ClCompile Include="..\..\src\softmax_layer.c" />
@ -289,6 +290,7 @@
<ClInclude Include="..\..\src\reorg_old_layer.h" />
<ClInclude Include="..\..\src\rnn_layer.h" />
<ClInclude Include="..\..\src\route_layer.h" />
<ClInclude Include="..\..\src\sam_layer.h" />
<ClInclude Include="..\..\src\scale_channels_layer.h" />
<ClInclude Include="..\..\src\shortcut_layer.h" />
<ClInclude Include="..\..\src\softmax_layer.h" />

@ -228,6 +228,7 @@
<ClCompile Include="..\..\src\rnn_layer.c" />
<ClCompile Include="..\..\src\rnn_vid.c" />
<ClCompile Include="..\..\src\route_layer.c" />
<ClCompile Include="..\..\src\sam_layer.c" />
<ClCompile Include="..\..\src\scale_channels_layer.c" />
<ClCompile Include="..\..\src\shortcut_layer.c" />
<ClCompile Include="..\..\src\softmax_layer.c" />
@ -289,6 +290,7 @@
<ClInclude Include="..\..\src\reorg_old_layer.h" />
<ClInclude Include="..\..\src\rnn_layer.h" />
<ClInclude Include="..\..\src\route_layer.h" />
<ClInclude Include="..\..\src\sam_layer.h" />
<ClInclude Include="..\..\src\scale_channels_layer.h" />
<ClInclude Include="..\..\src\shortcut_layer.h" />
<ClInclude Include="..\..\src\softmax_layer.h" />

@ -214,6 +214,7 @@
<ClCompile Include="..\..\src\rnn_layer.c" />
<ClCompile Include="..\..\src\rnn_vid.c" />
<ClCompile Include="..\..\src\route_layer.c" />
<ClCompile Include="..\..\src\sam_layer.c" />
<ClCompile Include="..\..\src\scale_channels_layer.c" />
<ClCompile Include="..\..\src\shortcut_layer.c" />
<ClCompile Include="..\..\src\softmax_layer.c" />
@ -275,6 +276,7 @@
<ClInclude Include="..\..\src\reorg_old_layer.h" />
<ClInclude Include="..\..\src\rnn_layer.h" />
<ClInclude Include="..\..\src\route_layer.h" />
<ClInclude Include="..\..\src\sam_layer.h" />
<ClInclude Include="..\..\src\scale_channels_layer.h" />
<ClInclude Include="..\..\src\shortcut_layer.h" />
<ClInclude Include="..\..\src\softmax_layer.h" />

@ -137,6 +137,7 @@ typedef enum {
LOCAL,
SHORTCUT,
SCALE_CHANNELS,
SAM,
ACTIVE,
RNN,
GRU,

@ -212,6 +212,10 @@ char *get_layer_string(LAYER_TYPE a)
return "route";
case SHORTCUT:
return "shortcut";
case SCALE_CHANNELS:
return "scale_channels";
case SAM:
return "sam";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
@ -524,8 +528,8 @@ int resize_network(network *net, int w, int h)
resize_route_layer(&l, net);
}else if (l.type == SHORTCUT) {
resize_shortcut_layer(&l, w, h);
}else if (l.type == SCALE_CHANNELS) {
resize_scale_channels_layer(&l, w, h);
//}else if (l.type == SCALE_CHANNELS) {
// resize_scale_channels_layer(&l, w, h);
}else if (l.type == UPSAMPLE) {
resize_upsample_layer(&l, w, h);
}else if(l.type == REORG){

@ -32,6 +32,7 @@
#include "route_layer.h"
#include "shortcut_layer.h"
#include "scale_channels_layer.h"
#include "sam_layer.h"
#include "softmax_layer.h"
#include "utils.h"
#include "upsample_layer.h"
@ -50,6 +51,7 @@ LAYER_TYPE string_to_layer_type(char * type)
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[scale_channels]") == 0) return SCALE_CHANNELS;
if (strcmp(type, "[sam]") == 0) return SAM;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
@ -622,6 +624,23 @@ layer parse_scale_channels(list *options, size_params params, network net)
return s;
}
layer parse_sam(list *options, size_params params, network net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if (index < 0) index = params.index + index;
int batch = params.batch;
layer from = net.layers[index];
layer s = make_scale_channels_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str_quiet(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
return s;
}
layer parse_activation(list *options, size_params params)
{
@ -923,6 +942,11 @@ network parse_network_cfg_custom(char *filename, int batch, int time_steps)
l = parse_scale_channels(options, params, net);
net.layers[count - 1].use_bin_output = 0;
net.layers[l.index].use_bin_output = 0;
}
else if (lt == SAM) {
l = parse_sam(options, params, net);
net.layers[count - 1].use_bin_output = 0;
net.layers[l.index].use_bin_output = 0;
}else if(lt == DROPOUT){
l = parse_dropout(options, params);
l.output = net.layers[count-1].output;

@ -0,0 +1,118 @@
#include "sam_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include <stdio.h>
#include <assert.h>
layer make_sam_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr,"scale Layer: %d\n", index);
layer l = { (LAYER_TYPE)0 };
l.type = SAM;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w2;
l.out_h = h2;
l.out_c = c2;
assert(l.out_c == l.c);
assert(l.w == l.out_w & l.h == l.out_h);
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.outputs;
l.index = index;
l.delta = (float*)calloc(l.outputs * batch, sizeof(float));
l.output = (float*)calloc(l.outputs * batch, sizeof(float));
l.forward = forward_sam_layer;
l.backward = backward_sam_layer;
#ifdef GPU
l.forward_gpu = forward_sam_layer_gpu;
l.backward_gpu = backward_sam_layer_gpu;
l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
#endif
return l;
}
void resize_sam_layer(layer *l, int w, int h)
{
l->out_w = w;
l->out_h = h;
l->outputs = l->out_w*l->out_h*l->out_c;
l->inputs = l->outputs;
l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
cuda_free(l->delta_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
#endif
}
void forward_sam_layer(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
float *from_output = state.net.layers[l.index].output;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
l.output[i] = state.input[i] * from_output[i];
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
void backward_sam_layer(const layer l, network_state state)
{
gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
float *from_output = state.net.layers[l.index].output;
float *from_delta = state.net.layers[l.index].delta;
int i;
#pragma omp parallel for
for (i = 0; i < size; ++i) {
state.delta[i] += l.delta[i] * from_output[i]; // l.delta * from (should be divided by channel_size?)
from_delta[i] = state.input[i] * l.delta[i]; // input * l.delta
}
}
#ifdef GPU
void forward_sam_layer_gpu(const layer l, network_state state)
{
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
scale_channels_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu);
activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_sam_layer_gpu(const layer l, network_state state)
{
gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
int size = l.batch * l.out_c * l.out_w * l.out_h;
int channel_size = 1;
float *from_output = state.net.layers[l.index].output_gpu;
float *from_delta = state.net.layers[l.index].delta_gpu;
backward_scale_channels_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta);
}
#endif

@ -0,0 +1,23 @@
#ifndef SAM_CHANNELS_LAYER_H
#define SAM_CHANNELS_LAYER_H
#include "layer.h"
#include "network.h"
#ifdef __cplusplus
extern "C" {
#endif
layer make_sam_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
void forward_sam_layer(const layer l, network_state state);
void backward_sam_layer(const layer l, network_state state);
void resize_sam_layer(layer *l, int w, int h);
#ifdef GPU
void forward_sam_layer_gpu(const layer l, network_state state);
void backward_sam_layer_gpu(const layer l, network_state state);
#endif
#ifdef __cplusplus
}
#endif
#endif // SAM_CHANNELS_LAYER_H
Loading…
Cancel
Save