天天看點

深度學習(五)yolov2代碼分析

Darknet代碼詳解:

上文配置好環境之後,進入darknet.c,接着進入run_detector(argc,argv)

if (0 == strcmp(argv[1], "average")){
        average(argc, argv);
    } else if (0 == strcmp(argv[1], "yolo")){
        run_yolo(argc, argv);
    } else if (0 == strcmp(argv[1], "voxel")){
        run_voxel(argc, argv);
    } else if (0 == strcmp(argv[1], "super")){
        run_super(argc, argv);
    } else if (0 == strcmp(argv[1], "detector")){
        run_detector(argc, argv);  //detector.c
    } else if (0 == strcmp(argv[1], "detect")){
        float thresh = find_float_arg(argc, argv, "-thresh", .24);
        char *filename = (argc > 4) ? argv[4]: 0;
        test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, .5);
    } else if (0 == strcmp(argv[1], "cifar")){
        run_cifar(argc, argv);
    } else if (0 == strcmp(argv[1], "go")){
        run_go(argc, argv);
    } else if (0 == strcmp(argv[1], "rnn")){
        run_char_rnn(argc, argv);
    } else if (0 == strcmp(argv[1], "vid")){
        run_vid_rnn(argc, argv);
    } else if (0 == strcmp(argv[1], "coco")){
        run_coco(argc, argv);
    } else if (0 == strcmp(argv[1], "classify")){
        predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5);
           

下面分别對train,test, visualize做詳細分析:

if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);
    else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);  //
    else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
           

對訓練部分做分析:

進入train_detector(datacfg,cfg, weights,......):

void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
    //讀取相應資料檔案
    list *options = read_data_cfg(datacfg);
    char *train_images = option_find_str(options, "train", "data/train.list");
    char *backup_directory = option_find_str(options, "backup", "/backup/");
    //srand()與rand()結合産生随機數
    srand(time(0)); 
    char *base = basecfg(cfgfile); //讀取網絡配置檔案
    printf("%s\n", base);
    float avg_loss = -1;
    network *nets = calloc(ngpus, sizeof(network));
    srand(time(0));
    int seed = rand();
    int i;
    //根據gpu個數解析網絡結構
    for(i = 0; i < ngpus; ++i){
        srand(seed);
#ifdef GPU
        cuda_set_device(gpus[i]);
#endif
        nets[i] = parse_network_cfg(cfgfile); //解析網絡結構,下文會對該函數做分析
       //如果指令包含權重檔案,則裝載權重檔案
        if(weightfile){
            load_weights(&nets[i], weightfile); 
        }
        if(clear) *nets[i].seen = 0; //清空記錄訓練次數
        nets[i].learning_rate *= ngpus;
    }
    srand(time(0));
    network net = nets[0];
    //一次載入到顯存的圖檔數量
    int imgs = net.batch * net.subdivisions * ngpus;
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
    data train, buffer;
    layer l = net.layers[net.n - 1];
    int classes = l.classes;
    //抖動産生額外資料
    float jitter = l.jitter;
   //得到訓練資料路徑
    list *plist = get_paths(train_images);
    //int N = plist->size;
    char **paths = (char **)list_to_array(plist);
    load_args args = {0};
    args.w = net.w;
    args.h = net.h;
    args.paths = paths;
    args.n = imgs;
    args.m = plist->size;
    args.classes = classes;
    args.jitter = jitter;
    args.num_boxes = l.max_boxes;
    args.d = &buffer;
    args.type = DETECTION_DATA;
    args.threads = 8;
   //資料擴增,角度,曝光,飽和,灰階
    args.angle = net.angle; 
    args.exposure = net.exposure;
    args.saturation = net.saturation;
    args.hue = net.hue;
    pthread_t load_thread = load_data(args);
    clock_t time;
    int count = 0;
    //while(i*imgs < N*120){
    while(get_current_batch(net) < net.max_batches){  //current number of iterations < the max number of iterations
        if(l.random && count++%10 == 0){   // the random initiation of layer equals zero or the number of iteration equals 10X
            printf("Resizing\n");
   //根據訓練圖檔的大小調節下面10,5,20參數,該處圖檔防縮範圍在100-280之間
            int dim = (rand() % 10 + 5) * 20;
  //最後200次疊代,圖檔大小為280*280
            if (get_current_batch(net)+200 > net.max_batches) dim = 280;
            //int dim = (rand() % 4 + 16) * 32;
            printf("%d\n", dim);
            args.w = dim;
            args.h = dim;
          //線程相關 
	  pthread_join(load_thread, 0);
            train = buffer;
            free_data(train);
            load_thread = load_data(args);
          //放縮網絡的大小進行訓練  
	 for(i = 0; i < ngpus; ++i){
                resize_network(nets + i, dim, dim);
            }
	 //net得到某一個gpu處理的結構,每個GPU處理網絡結構大小相同
            net = nets[0];
        }
        //可視化網絡權重和特征圖(僅CPU可看特征圖)
	visualize_network(net);
        time=clock();
        pthread_join(load_thread, 0);
        train = buffer;
        load_thread = load_data(args);
        //可視化資料擴增之後訓練樣本  
	 int k;
           image im = float_to_image(args.w, args.h, 3, train.X.vals[10]);
           for(k = 0; k < l.max_boxes; ++k){
           box b = float_to_box(train.y.vals[10] + k*5);      //train.y.vals[10] +1 + k*5
//           printf("%f %f %f %f %f\n", *(train.y.vals[10] + 1+ k*5), *(train.y.vals[10] + 2+ k*5), *(train.y.vals[10] + 3+ k*5),
//                   *(train.y.vals[10] + 4+ k*5),*(train.y.vals[10] + 5+ k*5));
           if(!b.x) break;
           printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);                    
           draw_bbox(im, b, 8, 1,0,0);
           }
           save_image(im, "sample");
        printf("Loaded: %lf seconds\n", sec(clock()-time));
        time=clock();
        float loss = 0;
//訓練網絡
#ifdef GPU
        if(ngpus == 1){
            loss = train_network(net, train);
        } else {
            loss = train_networks(nets, ngpus, train, 4);
        }
#else
        loss = train_network(net, train);
#endif
        if (avg_loss < 0) avg_loss = loss;
        avg_loss = avg_loss*.9 + loss*.1;
        i = get_current_batch(net);
        printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
      //每隔多少次儲存權重
        if(i%1000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
            save_weights(net, buff);
        }
        free_data(train);
    }
//疊代次數達到最大值,儲存最後權重
#ifdef GPU
    if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
    char buff[256];
    sprintf(buff, "%s/%s_final.weights", backup_directory, base);
    save_weights(net, buff);
}

           

/

//對parse_network_cfg(cfgfile)分析

free_section(s);
    fprintf(stderr, "layer     filters    size              input output\n");
//解析所有網絡
    while(n){
        params.index = count;
        fprintf(stderr, "%5d ", count);
        s = (section *)n->val;
        options = s->options;
        layer l = {0};
        LAYER_TYPE lt = string_to_layer_type(s->type);
        if(lt == CONVOLUTIONAL){
            l = parse_convolutional(options, params);
        }else if(lt == LOCAL){
            l = parse_local(options, params);
        }else if(lt == ACTIVE){
            l = parse_activation(options, params);
        }

/
//parse_convolutional(options, params);
/

 if(!(h && w && c)) error("Layer before convolutional layer must output image.");
    int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
    int binary = option_find_int_quiet(options, "binary", 0);
    int xnor = option_find_int_quiet(options, "xnor", 0);
    convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,padding,activation, batch_normalize, binary, xnor, params.net.adam);
    layer.flipped = option_find_int_quiet(options, "flipped", 0);
    layer.dot = option_find_float_quiet(options, "dot", 0);

/
//make_convolutional_layer(batch,h,w,c,n,size,stride,padding,activation, batch_normalize, binary, xnor, params.net.adam);

//載入權重檔案時,已經對卷積網絡做了初始化處理
for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);

///
//訓練網絡train_network(network net, data d)
//

        get_next_batch(d, batch, i*batch, X, y);
        float err = train_network_datum(net, X, y);//訓練網絡
        sum += err;

/
//train_network_datum(net, X, y);//訓練網絡
/
#ifdef GPU //使用GPU時訓練網絡
    if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
#endif
    network_state state;

/
//接着在network_kernels.cu找到train_network_datum_gpu
/
    *net.seen += net.batch;
    forward_backward_network_gpu(net, x, y); //前向後向傳播
    float error = get_network_cost(net); //網絡代價
    if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); //更新網絡

/
// forward_backward_network_gpu(net, x, y)
/
    forward_network_gpu(net, state); //前向傳播
    backward_network_gpu(net, state); //後向傳播


//forward_network_gpu(net, state); //前向傳播

        l.forward_gpu(l, state); //use function forward_convolutional_layer_gpu in convolutional_kernels.cu
        state.input = l.output_gpu;

///接着進入convolutional_kernels.cu
///找到與 l.forward_gpu(l, state);對應的函數
///void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
    for(i = 0; i < l.batch; ++i){
        im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c,  l.h,  l.w,  l.size,  l.stride, l.pad, state.workspace); //直接将feature map和輸入圖像矩陣拉成列向量
        float * a = l.weights_gpu;
        float * b = state.workspace;
        float * c = l.output_gpu;
        gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n); //進行卷積運算,函數在gemm.c裡面
    }

//進入im2col_kernels.cu
//将輸入圖檔或者前面feature map 拉伸,從左向右,從上到下,如果3*3卷積核,輸入彩色3通道圖檔大小480*480=230400,是以一個卷積核拉伸為3*3*3個230400列向量的大小
void im2col_ongpu(float *im,
         int channels, int height, int width,
         int ksize, int stride, int pad, float *data_col){
    // We are going to launch channels * height_col * width_col kernels, each
    // kernel responsible for copying a single-channel grid.
    int height_col = (height + 2 * pad - ksize) / stride + 1;
    int width_col = (width + 2 * pad - ksize) / stride + 1;
    int num_kernels = channels * height_col * width_col;
    //use num_kernels+BLOCK-1)/BLOCK block and BLOCK thread to run kernel.
    im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
        BLOCK>>>(
                num_kernels, im, height, width, ksize, pad,
                stride, height_col,
                width_col, data_col); //cuda block 和thread 的個數,調用 im2col_gpu_kernel函數
}
           
對測試部分做分析:      
進入test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);

//對檔案名字讀取,輸入參數有檔案直接進行下一步,沒有提示輸入
if(filename){
            strncpy(input, filename, 256);
        } else {
            printf("Enter Image Path: ");
            fflush(stdout);
            input = fgets(input, 256, stdin);
            if(!input) return;
            strtok(input, "\n");
        }
        image im = load_image_color(input,0,0);
        image sized = resize_image(im, net.w, net.h); //resize image to net.w and net.h
        /************/
        //the last layer uses to produce box and probs
        layer l = net.layers[net.n-1];
        box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
        float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
        for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes + 1, sizeof(float *));
        /************/
        float *X = sized.data;
        time=clock();
        network_predict(net, X);
        visualize_network(net);
//        get_network_image(net);
        printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
//預測盒子
        get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, 0, hier_thresh);
//進行非極大抑制
        if (l.softmax_tree && nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
        else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
//畫檢測框
        draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes);
        save_image(im, "predictions");
//        show_image(im, "predictions");
           
對可視化做分析:      
} else if (0 == strcmp(argv[1], "oneoff")){
        oneoff(argv[2], argv[3], argv[4]);
    } else if (0 == strcmp(argv[1], "partial")){
        partial(argv[2], argv[3], argv[4], atoi(argv[5]));
    } else if (0 == strcmp(argv[1], "average")){
        average(argc, argv);
    } else if (0 == strcmp(argv[1], "visualize")){ //visualize the weights of conv filter only, the values of feature map equals to 0
        visualize(argv[2], (argc > 3) ? argv[3] : 0);
    } else if (0 == strcmp(argv[1], "imtest")){
        test_resize(argv[2]);

分析visualize(argv[2], (argc > 3) ? argv[3] : 0);
///
    network net = parse_network_cfg(cfgfile); // parse net
    if(weightfile){
        load_weights(&net, weightfile); // load weights
    }
    visualize_network(net); //visualize

分析 visualize_network(net); 

    image *prev = 0;
    int i;
    char buff[256];
    //layer 0-n visualization
    for(i = 0; i < net.n; ++i){
        sprintf(buff, "Layer %d", i);
        layer l = net.layers[i]; //layer ith
        if(l.type == CONVOLUTIONAL){  //if layer is conv, visualize conv layer
            prev = visualize_convolutional_layer(l, buff, prev);
        }
    } 

//
分析:visualize_convolutional_layer(l, buff, prev);
    image *single_weights = get_weights(l);
    show_images(single_weights, l.n, window); // visualize conv layer l.n
    //visualize feature map according to layer l.n
    image delta = get_convolutional_image(l);
    image dc = collapse_image_layers(delta, 1);
    char buff[256];
    sprintf(buff, "%s: Output", window);
    save_image(dc, buff);
//    show_image(dc, buff);
    free_image(dc);
    return single_weights;
           

繼續閱讀