diff --git a/src/avgpool_layer.c b/src/avgpool_layer.c index 1306fd38..2b595aac 100644 --- a/src/avgpool_layer.c +++ b/src/avgpool_layer.c @@ -1,5 +1,6 @@ #include "avgpool_layer.h" #include "dark_cuda.h" +#include "utils.h" #include avgpool_layer make_avgpool_layer(int batch, int w, int h, int c) diff --git a/src/batchnorm_layer.c b/src/batchnorm_layer.c index 018294df..253ce288 100644 --- a/src/batchnorm_layer.c +++ b/src/batchnorm_layer.c @@ -1,5 +1,6 @@ #include "batchnorm_layer.h" #include "blas.h" +#include "utils.h" #include layer make_batchnorm_layer(int batch, int w, int h, int c) diff --git a/src/blas.c b/src/blas.c index 96fc067f..212fccdb 100644 --- a/src/blas.c +++ b/src/blas.c @@ -1,4 +1,5 @@ #include "blas.h" +#include "utils.h" #include #include @@ -358,4 +359,4 @@ void fix_nan_and_inf_cpu(float *input, size_t size) if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } -} \ No newline at end of file +} diff --git a/src/gru_layer.c b/src/gru_layer.c index b24bee94..787f46ea 100644 --- a/src/gru_layer.c +++ b/src/gru_layer.c @@ -36,36 +36,36 @@ layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_no l.steps = steps; l.inputs = inputs; - l.input_z_layer = (layer*)malloc(sizeof(layer)); + l.input_z_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.input_z_layer) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.input_z_layer->batch = batch; - l.state_z_layer = (layer*)malloc(sizeof(layer)); + l.state_z_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.state_z_layer) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.state_z_layer->batch = batch; - l.input_r_layer = (layer*)malloc(sizeof(layer)); + l.input_r_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.input_r_layer) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.input_r_layer->batch = batch; - l.state_r_layer = (layer*)malloc(sizeof(layer)); + l.state_r_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.state_r_layer) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.state_r_layer->batch = batch; - l.input_h_layer = (layer*)malloc(sizeof(layer)); + l.input_h_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.input_h_layer) = make_connected_layer(batch, steps, inputs, outputs, LINEAR, batch_normalize); l.input_h_layer->batch = batch; - l.state_h_layer = (layer*)malloc(sizeof(layer)); + l.state_h_layer = (layer*)xcalloc(1,sizeof(layer)); fprintf(stderr, "\t\t"); *(l.state_h_layer) = make_connected_layer(batch, steps, outputs, outputs, LINEAR, batch_normalize); l.state_h_layer->batch = batch;