@ -32,7 +32,7 @@ static void increment_layer(layer *l, int steps)
}
layer make_conv_lstm_layer ( int batch , int h , int w , int c , int output_filters , int groups , int steps , int size , int stride , int dilation , int pad , ACTIVATION activation , int batch_normalize , int peephole , int xnor )
layer make_conv_lstm_layer ( int batch , int h , int w , int c , int output_filters , int groups , int steps , int size , int stride , int dilation , int pad , ACTIVATION activation , int batch_normalize , int peephole , int xnor , int train )
{
fprintf ( stderr , " CONV_LSTM Layer: %d x %d x %d image, %d filters \n " , h , w , c , output_filters ) ;
/*
@ -48,6 +48,7 @@ layer make_conv_lstm_layer(int batch, int h, int w, int c, int output_filters, i
*/
batch = batch / steps ;
layer l = { ( LAYER_TYPE ) 0 } ;
l . train = train ;
l . batch = batch ;
l . type = CONV_LSTM ;
l . steps = steps ;
@ -66,44 +67,44 @@ layer make_conv_lstm_layer(int batch, int h, int w, int c, int output_filters, i
// U
l . uf = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . uf ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . uf ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . uf - > batch = batch ;
if ( l . workspace_size < l . uf - > workspace_size ) l . workspace_size = l . uf - > workspace_size ;
l . ui = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . ui ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . ui ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . ui - > batch = batch ;
if ( l . workspace_size < l . ui - > workspace_size ) l . workspace_size = l . ui - > workspace_size ;
l . ug = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . ug ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . ug ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . ug - > batch = batch ;
if ( l . workspace_size < l . ug - > workspace_size ) l . workspace_size = l . ug - > workspace_size ;
l . uo = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . uo ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . uo ) = make_convolutional_layer ( batch , steps , h , w , c , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . uo - > batch = batch ;
if ( l . workspace_size < l . uo - > workspace_size ) l . workspace_size = l . uo - > workspace_size ;
// W
l . wf = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . wf ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . wf ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . wf - > batch = batch ;
if ( l . workspace_size < l . wf - > workspace_size ) l . workspace_size = l . wf - > workspace_size ;
l . wi = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . wi ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . wi ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . wi - > batch = batch ;
if ( l . workspace_size < l . wi - > workspace_size ) l . workspace_size = l . wi - > workspace_size ;
l . wg = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . wg ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . wg ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . wg - > batch = batch ;
if ( l . workspace_size < l . wg - > workspace_size ) l . workspace_size = l . wg - > workspace_size ;
l . wo = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
* ( l . wo ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . wo ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . wo - > batch = batch ;
if ( l . workspace_size < l . wo - > workspace_size ) l . workspace_size = l . wo - > workspace_size ;
@ -111,21 +112,21 @@ layer make_conv_lstm_layer(int batch, int h, int w, int c, int output_filters, i
// V
l . vf = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
if ( l . peephole ) {
* ( l . vf ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . vf ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . vf - > batch = batch ;
if ( l . workspace_size < l . vf - > workspace_size ) l . workspace_size = l . vf - > workspace_size ;
}
l . vi = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
if ( l . peephole ) {
* ( l . vi ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . vi ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . vi - > batch = batch ;
if ( l . workspace_size < l . vi - > workspace_size ) l . workspace_size = l . vi - > workspace_size ;
}
l . vo = ( layer * ) calloc ( 1 , sizeof ( layer ) ) ;
if ( l . peephole ) {
* ( l . vo ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 ) ;
* ( l . vo ) = make_convolutional_layer ( batch , steps , h , w , output_filters , output_filters , groups , size , stride , stride , dilation , pad , activation , batch_normalize , 0 , xnor , 0 , 0 , 0 , 0 , NULL , 0 , train ) ;
l . vo - > batch = batch ;
if ( l . workspace_size < l . vo - > workspace_size ) l . workspace_size = l . vo - > workspace_size ;
}