Fixed many warnings

pull/2383/head^2
AlexeyAB 6 years ago
parent 6d6202f666
commit b3254ed523
  1. 2
      src/captcha.c
  2. 4
      src/cifar.c
  3. 6
      src/classifier.c
  4. 2
      src/compare.c
  5. 25
      src/convolutional_layer.c
  6. 2
      src/darknet.c
  7. 2
      src/darkunistd.h
  8. 8
      src/data.c
  9. 21
      src/detector.c
  10. 2
      src/dice.c
  11. 28
      src/gemm.c
  12. 2
      src/go.c
  13. 2
      src/http_stream.cpp
  14. 3
      src/matrix.c
  15. 6
      src/network.c
  16. 2
      src/rnn.c
  17. 2
      src/tag.c
  18. 15
      src/utils.c
  19. 2
      src/writing.c
  20. 10
      src/yolo_v2_class.cpp

@ -85,7 +85,7 @@ void train_captcha(char *cfgfile, char *weightfile)
float loss = train_network(net, train);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), *net.seen);
printf("%d: %f, %f avg, %lf seconds, %ld images\n", i, loss, avg_loss, sec(clock()-time), *net.seen);
free_data(train);
if(i%100==0){
char buff[256];

@ -33,7 +33,7 @@ void train_cifar(char *cfgfile, char *weightfile)
float loss = train_network_sgd(net, train, 1);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];
@ -89,7 +89,7 @@ void train_cifar_distill(char *cfgfile, char *weightfile)
float loss = train_network_sgd(net, train, 1);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];

@ -177,7 +177,7 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
draw_precision = 1;
}
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/ train_images_num, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/ train_images_num, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
#ifdef OPENCV
draw_train_loss(img, img_size, avg_loss, max_img_loss, i, net.max_batches, topk, draw_precision, "top5", dont_show, mjpeg_port);
#endif // OPENCV
@ -198,7 +198,7 @@ void train_classifier(char *datacfg, char *cfgfile, char *weightfile, int *gpus,
if (ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_last.weights", backup_directory, base, i);
sprintf(buff, "%s/%s_last.weights", backup_directory, base);
save_weights(net, buff);
}
free_data(train);
@ -791,7 +791,7 @@ void predict_classifier(char *datacfg, char *cfgfile, char *weightfile, char *fi
int* indexes = (int*)calloc(top, sizeof(int));
char buff[256];
char *input = buff;
int size = net.w;
//int size = net.w;
while(1){
if(filename){
strncpy(input, filename, 256);

@ -54,7 +54,7 @@ void train_compare(char *cfgfile, char *weightfile)
float loss = train_network(net, train);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%.3f: %f, %f avg, %lf seconds, %d images\n", (float)*net.seen/N, loss, avg_loss, sec(clock()-time), *net.seen);
printf("%.3f: %f, %f avg, %lf seconds, %ld images\n", (float)*net.seen/N, loss, avg_loss, sec(clock()-time), *net.seen);
free_data(train);
if(i%100 == 0){
char buff[256];

@ -679,7 +679,7 @@ void float_to_bit(float *src, unsigned char *dst, size_t size) {
void bit_to_float(unsigned char *src, float *dst, size_t size, size_t filters, float *mean_arr) {
memset(dst, 0, size *sizeof(float));
size_t i, src_i, src_shift;
size_t i;
for (i = 0; i < size; ++i) {
float mean_val = 1;
@ -726,7 +726,7 @@ void binary_align_weights(convolutional_layer *l)
const int items_per_channel = l->size*l->size;
for (i = 0; i < items_per_channel; ++i)
{
uint32_t val = 0;
//uint32_t val = 0;
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = l->binary_weights[fil*items_per_filter + (chan + c_pack)*items_per_channel + i];
@ -749,8 +749,8 @@ void binary_align_weights(convolutional_layer *l)
//if (l->n >= 32)
if(gpu_index >= 0)
{
int M = l->n;
int N = l->out_w*l->out_h;
//int M = l->n;
//int N = l->out_w*l->out_h;
//printf("\n M = %d, N = %d, M %% 8 = %d, N %% 8 = %d - weights \n", M, N, M % 8, N % 8);
//printf("\n l.w = %d, l.c = %d, l.n = %d \n", l->w, l->c, l->n);
for (i = 0; i < align_weights_size / 8; ++i) l->align_bit_weights[i] = ~(l->align_bit_weights[i]);
@ -806,7 +806,7 @@ size_t binary_transpose_align_input(int k, int n, float *b, char **t_bit_input,
size_t t_bit_input_size = t_intput_size / 8;// +1;
memset(*t_bit_input, 0, t_bit_input_size * sizeof(char));
int src_size = k * bit_align;
//int src_size = k * bit_align;
// b - [bit_align, k] - [l.bit_align, l.size*l.size*l.c] = src_size
// t_input - [bit_align, k] - [n', k]
@ -862,8 +862,8 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
int ldb_align = l.lda_align;
size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8;
size_t t_intput_size = new_ldb * l.bit_align;// n;
size_t t_bit_input_size = t_intput_size / 8;// +1;
//size_t t_intput_size = new_ldb * l.bit_align;// n;
//size_t t_bit_input_size = t_intput_size / 8;// +1;
int re_packed_input_size = l.c * l.w * l.h;
memset(state.workspace, 0, re_packed_input_size * sizeof(float));
@ -928,17 +928,18 @@ void forward_convolutional_layer(convolutional_layer l, network_state state)
//im2col_cpu_custom_align(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align);
im2col_cpu_custom_bin(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align);
size_t output_size = l.outputs;
//size_t output_size = l.outputs;
//float *count_output = calloc(output_size, sizeof(float));
//size_t bit_output_size = output_size / 8 + 1;
//char *bit_output = calloc(bit_output_size, sizeof(char));
size_t intput_size = n * k; // (out_h*out_w) X (l.size*l.size*l.c) : after im2col()
size_t bit_input_size = intput_size / 8 + 1;
//size_t intput_size = n * k; // (out_h*out_w) X (l.size*l.size*l.c) : after im2col()
//size_t bit_input_size = intput_size / 8 + 1;
//char *bit_input = calloc(bit_input_size, sizeof(char));
size_t weights_size = k * m; //l.size*l.size*l.c*l.n;
size_t bit_weights_size = weights_size / 8 + 1;
//size_t weights_size = k * m; //l.size*l.size*l.c*l.n;
//size_t bit_weights_size = weights_size / 8 + 1;
//char *bit_weights = calloc(bit_weights_size, sizeof(char));
//float *mean_arr = calloc(l.n, sizeof(float));

@ -479,7 +479,7 @@ int main(int argc, char **argv)
float thresh = find_float_arg(argc, argv, "-thresh", .24);
int ext_output = find_arg(argc, argv, "-ext_output");
char *filename = (argc > 4) ? argv[4]: 0;
test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, 0.5, 0, 1, 0, NULL);
test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, 0.5, 0, ext_output, 0, NULL);
} else if (0 == strcmp(argv[1], "cifar")){
run_cifar(argc, argv);
} else if (0 == strcmp(argv[1], "go")){

@ -21,7 +21,7 @@
These may be OR'd together. */
#define R_OK 4 /* Test for read permission. */
#define W_OK 2 /* Test for write permission. */
#define X_OK R_OK /* execute permission - unsupported in Windows, \
#define X_OK R_OK /* execute permission - unsupported in Windows, */
#define F_OK 0 /* Test for existence. */
#define access _access

@ -790,8 +790,8 @@ data load_data_detection(int n, char **paths, int m, int w, int h, int c, int bo
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1, r2, r3, r4;
float dhue, dsat, dexp, flip;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5*boxes);
@ -890,8 +890,8 @@ data load_data_detection(int n, char **paths, int m, int w, int h, int c, int bo
d.X.vals = (float**)calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*c;
float r1, r2, r3, r4;
float dhue, dsat, dexp, flip;
float r1 = 0, r2 = 0, r3 = 0, r4 = 0;
float dhue = 0, dsat = 0, dexp = 0, flip = 0;
int augmentation_calculated = 0;
d.y = make_matrix(n, 5 * boxes);

@ -326,7 +326,7 @@ void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, i
if (ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_last.weights", backup_directory, base, i);
sprintf(buff, "%s/%s_last.weights", backup_directory, base);
save_weights(net, buff);
}
free_data(train);
@ -594,7 +594,7 @@ void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile)
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net.layers[net.n - 1];
//layer l = net.layers[net.n - 1];
int j, k;
@ -681,16 +681,16 @@ float validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, floa
char *difficult_valid_images = option_find_str(options, "difficult", NULL);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
//char *mapf = option_find_str(options, "map", 0);
//int *map = 0;
//if (mapf) map = read_map(mapf);
FILE* reinforcement_fd = NULL;
network net;
int initial_batch;
//int initial_batch;
if (existing_net) {
char *train_images = option_find_str(options, "train", "data/train.txt");
char *valid_images = option_find_str(options, "valid", train_images);
valid_images = option_find_str(options, "valid", train_images);
net = *existing_net;
}
else {
@ -1131,8 +1131,8 @@ void calc_anchors(char *datacfg, int num_of_clusters, int width, int height, int
printf("\n");
for (i = 0; i < number_of_boxes; ++i) {
float w = boxes_data.vals[i][0] = rel_width_height_array[i * 2];
float h = boxes_data.vals[i][1] = rel_width_height_array[i * 2 + 1];
boxes_data.vals[i][0] = rel_width_height_array[i * 2];
boxes_data.vals[i][1] = rel_width_height_array[i * 2 + 1];
//if (w > 410 || h > 410) printf("i:%d, w = %f, h = %f \n", i, w, h);
}
@ -1170,7 +1170,7 @@ void calc_anchors(char *datacfg, int num_of_clusters, int width, int height, int
float anchor_w = anchors_data.centers.vals[cluster_idx][0]; //centers->data.fl[cluster_idx * 2];
float anchor_h = anchors_data.centers.vals[cluster_idx][1]; //centers->data.fl[cluster_idx * 2 + 1];
if (best_iou > 1 || best_iou < 0) { // || box_w > width || box_h > height) {
printf(" Wrong label: i = %d, box_w = %d, box_h = %d, anchor_w = %d, anchor_h = %d, iou = %f \n",
printf(" Wrong label: i = %d, box_w = %f, box_h = %f, anchor_w = %f, anchor_h = %f, iou = %f \n",
i, box_w, box_h, anchor_w, anchor_h, best_iou);
}
else avg_iou += best_iou;
@ -1287,7 +1287,6 @@ void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filenam
if (net.layers[net.n - 1].classes > names_size) getchar();
}
srand(2222222);
double time;
char buff[256];
char *input = buff;
char *json_buf = NULL;

@ -33,7 +33,7 @@ void train_dice(char *cfgfile, char *weightfile)
float loss = train_network(net, train);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), *net.seen);
printf("%d: %f, %f avg, %lf seconds, %ld images\n", i, loss, avg_loss, sec(clock()-time), *net.seen);
free_data(train);
if((i % 100) == 0) net.learning_rate *= .1;
if(i%100==0){

@ -321,7 +321,7 @@ void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
// is not used
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned x, y, t;
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= (uint32_t)1 << y;
@ -400,7 +400,7 @@ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, in
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y, t;
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
@ -755,7 +755,7 @@ void gemm_nn_fast(int M, int N, int K, float ALPHA,
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, j_d, k_d;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
@ -768,8 +768,8 @@ void gemm_nn_fast(int M, int N, int K, float ALPHA,
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2, b256_2; // AVX
__m256 a256_3, b256_3; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
@ -943,8 +943,8 @@ void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
@ -991,8 +991,8 @@ void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stri
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
@ -1203,7 +1203,7 @@ void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
__m256i all_1 = _mm256_set1_epi8(255);
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
@ -1770,7 +1770,7 @@ void float_to_bit(float *src, unsigned char *dst, size_t size)
memset(dst, 0, dst_size);
size_t i;
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
@ -1881,8 +1881,8 @@ void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, i
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
@ -2513,7 +2513,7 @@ void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, floa
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, c_pack, y, x, f_y, f_x;
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)

@ -144,7 +144,7 @@ void train_go(char *cfgfile, char *weightfile)
float loss = train_network_datum(net, board, move) / net.batch;
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.95 + loss*.05;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
if(*net.seen/N > epoch){
epoch = *net.seen/N;
char buff[256];

@ -181,7 +181,7 @@ public:
if (::select(maxfd + 1, &rread, NULL, NULL, &select_timeout) <= 0)
return true; // nothing broken, there's just noone listening
size_t outlen = strlen(outputbuf);
int outlen = static_cast<int>(strlen(outputbuf));
#ifdef _WIN32
for (unsigned i = 0; i<rread.fd_count; i++)

@ -257,7 +257,7 @@ void kmeans_maximization(matrix data, int *assignments, matrix centers)
void random_centers(matrix data, matrix centers) {
int i, j;
int i;
int *s = sample(data.rows);
for (i = 0; i < centers.rows; ++i) {
copy(data.vals[s[i]], centers.vals[i], data.cols);
@ -281,7 +281,6 @@ int *sample(int n)
float dist(float *x, float *y, int n)
{
int i;
//printf(" x0 = %f, x1 = %f, y0 = %f, y1 = %f \n", x[0], x[1], y[0], y[1]);
float mw = (x[0] < y[0]) ? x[0] : y[0];
float mh = (x[1] < y[1]) ? x[1] : y[1];

@ -488,8 +488,8 @@ int resize_network(network *net, int w, int h)
h = l.out_h;
if(l.type == AVGPOOL) break;
}
const int size = get_network_input_size(*net) * net->batch;
#ifdef GPU
const int size = get_network_input_size(*net) * net->batch;
if(gpu_index >= 0){
printf(" try to allocate additional workspace_size = %1.2f MB \n", (float)workspace_size / 1000000);
net->workspace = cuda_make_array(0, workspace_size/sizeof(float) + 1);
@ -728,10 +728,10 @@ char *detection_to_json(detection *dets, int nboxes, int classes, char **names,
char *send_buf = (char *)calloc(1024, sizeof(char));
if (filename) {
sprintf(send_buf, "{\n \"frame_id\":%d, \n \"filename\":\"%s\", \n \"objects\": [ \n", frame_id, filename);
sprintf(send_buf, "{\n \"frame_id\":%lld, \n \"filename\":\"%s\", \n \"objects\": [ \n", frame_id, filename);
}
else {
sprintf(send_buf, "{\n \"frame_id\":%d, \n \"objects\": [ \n", frame_id);
sprintf(send_buf, "{\n \"frame_id\":%lld, \n \"objects\": [ \n", frame_id);
}
int i, j;

@ -163,7 +163,7 @@ void train_char_rnn(char *cfgfile, char *weightfile, char *filename, int clear,
int i = (*net.seen)/net.batch;
int streams = batch/steps;
printf("\n batch = %d, steps = %d, streams = %d, subdivisions = %d, text_size = %d \n", batch, steps, streams, net.subdivisions, size);
printf("\n batch = %d, steps = %d, streams = %d, subdivisions = %d, text_size = %ld \n", batch, steps, streams, net.subdivisions, size);
printf(" global_batch = %d \n", batch*net.subdivisions);
size_t* offsets = (size_t*)calloc(streams, sizeof(size_t));
int j;

@ -64,7 +64,7 @@ void train_tag(char *cfgfile, char *weightfile, int clear)
float loss = train_network(net, train);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
free_data(train);
if(*net.seen/N > epoch){
epoch = *net.seen/N;

@ -768,13 +768,12 @@ float rand_uniform(float min, float max)
max = swap;
}
if (RAND_MAX < 65536) {
#if (RAND_MAX < 65536)
int rnd = rand()*(RAND_MAX + 1) + rand();
return ((float)rnd / (RAND_MAX*RAND_MAX) * (max - min)) + min;
}
else {
#else
return ((float)rand() / RAND_MAX * (max - min)) + min;
}
#endif
//return (random_float() * (max - min)) + min;
}
@ -802,12 +801,12 @@ unsigned int random_gen()
unsigned int rnd = 0;
#ifdef WIN32
rand_s(&rnd);
#else
#else // WIN32
rnd = rand();
if (RAND_MAX < 65536) {
#if (RAND_MAX < 65536)
rnd = rand()*(RAND_MAX + 1) + rnd;
}
#endif
#endif //(RAND_MAX < 65536)
#endif // WIN32
return rnd;
}

@ -69,7 +69,7 @@ void train_writing(char *cfgfile, char *weightfile)
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
free_data(train);
if(get_current_batch(net)%100 == 0){
char buff[256];

@ -104,8 +104,8 @@ struct detector_gpu_t {
LIB_API Detector::Detector(std::string cfg_filename, std::string weight_filename, int gpu_id) : cur_gpu_id(gpu_id)
{
wait_stream = 0;
int old_gpu_index;
#ifdef GPU
int old_gpu_index;
check_cuda( cudaGetDevice(&old_gpu_index) );
#endif
@ -151,7 +151,7 @@ LIB_API Detector::Detector(std::string cfg_filename, std::string weight_filename
LIB_API Detector::~Detector()
{
detector_gpu_t &detector_gpu = *static_cast<detector_gpu_t *>(detector_gpu_ptr.get());
layer l = detector_gpu.net.layers[detector_gpu.net.n - 1];
//layer l = detector_gpu.net.layers[detector_gpu.net.n - 1];
free(detector_gpu.track_id);
@ -159,8 +159,8 @@ LIB_API Detector::~Detector()
for (int j = 0; j < NFRAMES; ++j) free(detector_gpu.predictions[j]);
for (int j = 0; j < NFRAMES; ++j) if (detector_gpu.images[j].data) free(detector_gpu.images[j].data);
int old_gpu_index;
#ifdef GPU
int old_gpu_index;
cudaGetDevice(&old_gpu_index);
cuda_set_device(detector_gpu.net.gpu_index);
#endif
@ -241,8 +241,8 @@ LIB_API std::vector<bbox_t> Detector::detect(image_t img, float thresh, bool use
{
detector_gpu_t &detector_gpu = *static_cast<detector_gpu_t *>(detector_gpu_ptr.get());
network &net = detector_gpu.net;
int old_gpu_index;
#ifdef GPU
int old_gpu_index;
cudaGetDevice(&old_gpu_index);
if(cur_gpu_id != old_gpu_index)
cudaSetDevice(net.gpu_index);
@ -289,7 +289,7 @@ LIB_API std::vector<bbox_t> Detector::detect(image_t img, float thresh, bool use
std::vector<bbox_t> bbox_vec;
for (size_t i = 0; i < nboxes; ++i) {
for (int i = 0; i < nboxes; ++i) {
box b = dets[i].bbox;
int const obj_id = max_index(dets[i].prob, l.classes);
float const prob = dets[i].prob[obj_id];

Loading…
Cancel
Save