您的位置:首页 > 编程语言

深度学习(五)yolov2代码分析

2017-04-21 19:54 483 查看
Darknet代码详解:
上文配置好环境之后,进入darknet.c,接着进入run_detector(argc,argv)
if (0 == strcmp(argv[1], "average")){
average(argc, argv);
} else if (0 == strcmp(argv[1], "yolo")){
run_yolo(argc, argv);
} else if (0 == strcmp(argv[1], "voxel")){
run_voxel(argc, argv);
} else if (0 == strcmp(argv[1], "super")){
run_super(argc, argv);
} else if (0 == strcmp(argv[1], "detector")){
run_detector(argc, argv);  //detector.c
} else if (0 == strcmp(argv[1], "detect")){
float thresh = find_float_arg(argc, argv, "-thresh", .24);
char *filename = (argc > 4) ? argv[4]: 0;
test_detector("cfg/coco.data", argv[2], argv[3], filename, thresh, .5);
} else if (0 == strcmp(argv[1], "cifar")){
run_cifar(argc, argv);
} else if (0 == strcmp(argv[1], "go")){
run_go(argc, argv);
} else if (0 == strcmp(argv[1], "rnn")){
run_char_rnn(argc, argv);
} else if (0 == strcmp(argv[1], "vid")){
run_vid_rnn(argc, argv);
} else if (0 == strcmp(argv[1], "coco")){
run_coco(argc, argv);
} else if (0 == strcmp(argv[1], "classify")){
predict_classifier("cfg/imagenet1k.data", argv[2], argv[3], argv[4], 5);

下面分别对train,test,
visualize做详细分析:
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);  //
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);

对训练部分做分析:
进入train_detector(datacfg,cfg,
weights,......):
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
//读取相应数据文件
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
//srand()与rand()结合产生随机数
srand(time(0));
char *base = basecfg(cfgfile); //读取网络配置文件
printf("%s\n", base);
float avg_loss = -1;
network *nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
//根据gpu个数解析网络结构
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = parse_network_cfg(cfgfile); //解析网络结构,下文会对该函数做分析
//如果命令包含权重文件,则装载权重文件
if(weightfile){
load_weights(&nets[i], weightfile);
}
if(clear) *nets[i].seen = 0; //清空记录训练次数
nets[i].learning_rate *= ngpus;
}
srand(time(0));
network net = nets[0];
//一次载入到显存的图片数量
int imgs = net.batch * net.subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
data train, buffer;
layer l = net.layers[net.n - 1];
int classes = l.classes;
//抖动产生额外数据
float jitter = l.jitter;
//得到训练数据路径
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = {0};
args.w = net.w;
args.h = net.h;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
args.threads = 8;
//数据扩增,角度,曝光,饱和,灰度
args.angle = net.angle;
args.exposure = net.exposure;
args.saturation = net.saturation;
args.hue = net.hue;
pthread_t load_thread = load_data(args);
clock_t time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net.max_batches){  //current number of iterations < the max number of iterations
if(l.random && count++%10 == 0){   // the random initiation of layer equals zero or the number of iteration equals 10X
printf("Resizing\n");
//根据训练图片的大小调节下面10,5,20参数,该处图片防缩范围在100-280之间
int dim = (rand() % 10 + 5) * 20;
//最后200次迭代,图片大小为280*280
if (get_current_batch(net)+200 > net.max_batches) dim = 280;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
//线程相关
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
//放缩网络的大小进行训练
for(i = 0; i < ngpus; ++i){
resize_network(nets + i, dim, dim);
}
//net得到某一个gpu处理的结构,每个GPU处理网络结构大小相同
net = nets[0];
}
//可视化网络权重和特征图(仅CPU可看特征图)
visualize_network(net);
time=clock();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
//可视化数据扩增之后训练样本
int k;
image im = float_to_image(args.w, args.h, 3, train.X.vals[10]);
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + k*5);      //train.y.vals[10] +1 + k*5
//           printf("%f %f %f %f %f\n", *(train.y.vals[10] + 1+ k*5), *(train.y.vals[10] + 2+ k*5), *(train.y.vals[10] + 3+ k*5),
//                   *(train.y.vals[10] + 4+ k*5),*(train.y.vals[10] + 5+ k*5));
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 8, 1,0,0);
}
save_image(im, "sample");
printf("Loaded: %lf seconds\n", sec(clock()-time));
time=clock();
float loss = 0;
//训练网络
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
//每隔多少次保存权重
if(i%1000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
//迭代次数达到最大值,保存最后权重
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}

/////////////////////////
//对parse_network_cfg(cfgfile)分析
////////////////////////
free_section(s);
fprintf(stderr, "layer     filters    size              input output\n");
//解析所有网络
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
layer l = {0};
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == LOCAL){
l = parse_local(options, params);
}else if(lt == ACTIVE){
l = parse_activation(options, params);
}

/////////////
//parse_convolutional(options, params);
/////////////

if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,padding,activation, batch_normalize, binary, xnor, params.net.adam);
layer.flipped = option_find_int_quiet(options, "flipped", 0);
layer.dot = option_find_float_quiet(options, "dot", 0);

/////////////
//make_convolutional_layer(batch,h,w,c,n,size,stride,padding,activation, batch_normalize, binary, xnor, params.net.adam);
////////////
//载入权重文件时,已经对卷积网络做了初始化处理
for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);

///////////
//训练网络train_network(network net, data d)
//////////

get_next_batch(d, batch, i*batch, X, y);
float err = train_network_datum(net, X, y);//训练网络
sum += err;

/////////
//train_network_datum(net, X, y);//训练网络
/////////
#ifdef GPU //使用GPU时训练网络
if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
#endif
network_state state;

/////////
//接着在network_kernels.cu找到train_network_datum_gpu
/////////////
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y); //前向后向传播
float error = get_network_cost(net); //网络代价
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); //更新网络

/////////
// forward_backward_network_gpu(net, x, y)
/////////
forward_network_gpu(net, state); //前向传播
backward_network_gpu(net, state); //后向传播

////////
//forward_network_gpu(net, state); //前向传播
////////
l.forward_gpu(l, state); //use function forward_convolutional_layer_gpu in convolutional_kernels.cu
state.input = l.output_gpu;

///接着进入convolutional_kernels.cu
///找到与 l.forward_gpu(l, state);对应的函数
///void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
for(i = 0; i < l.batch; ++i){
im2col_ongpu(state.input + i*l.c*l.h*l.w, l.c,  l.h,  l.w,  l.size,  l.stride, l.pad, state.workspace); //直接将feature map和输入图像矩阵拉成列向量
float * a = l.weights_gpu;
float * b = state.workspace;
float * c = l.output_gpu;
gemm_ongpu(0,0,m,n,k,1.,a,k,b,n,1.,c+i*m*n,n); //进行卷积运算,函数在gemm.c里面
}

//进入im2col_kernels.cu
//将输入图片或者前面feature map 拉伸,从左向右,从上到下,如果3*3卷积核,输入彩色3通道图片大小480*480=230400,所以一个卷积核拉伸为3*3*3个230400列向量的大小
void im2col_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
//use num_kernels+BLOCK-1)/BLOCK block and BLOCK thread to run kernel.
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col); //cuda block 和thread 的个数,调用 im2col_gpu_kernel函数
}

对测试部分做分析:

进入test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);

//对文件名字读取,输入参数有文件直接进行下一步,没有提示输入
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = resize_image(im, net.w, net.h); //resize image to net.w and net.h
/************/
//the last layer uses to produce box and probs
layer l = net.layers[net.n-1];
box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes + 1, sizeof(float *));
/************/
float *X = sized.data;
time=clock();
network_predict(net, X);
visualize_network(net);
//        get_network_image(net);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
//预测盒子
get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, 0, hier_thresh);
//进行非极大抑制
if (l.softmax_tree && nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
//画检测框
draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes);
save_image(im, "predictions");
//        show_image(im, "predictions");

对可视化做分析:

} else if (0 == strcmp(argv[1], "oneoff")){
oneoff(argv[2], argv[3], argv[4]);
} else if (0 == strcmp(argv[1], "partial")){
partial(argv[2], argv[3], argv[4], atoi(argv[5]));
} else if (0 == strcmp(argv[1], "average")){
average(argc, argv);
} else if (0 == strcmp(argv[1], "visualize")){ //visualize the weights of conv filter only, the values of feature map equals to 0
visualize(argv[2], (argc > 3) ? argv[3] : 0);
} else if (0 == strcmp(argv[1], "imtest")){
test_resize(argv[2]);

分析visualize(argv[2], (argc > 3) ? argv[3] : 0);
///
network net = parse_network_cfg(cfgfile); // parse net
if(weightfile){
load_weights(&net, weightfile); // load weights
}
visualize_network(net); //visualize
////
分析 visualize_network(net);

image *prev = 0;
int i;
char buff[256];
//layer 0-n visualization
for(i = 0; i < net.n; ++i){
sprintf(buff, "Layer %d", i);
layer l = net.layers[i]; //layer ith
if(l.type == CONVOLUTIONAL){  //if layer is conv, visualize conv layer
prev = visualize_convolutional_layer(l, buff, prev);
}
}

//////
分析:visualize_convolutional_layer(l, buff, prev);
image *single_weights = get_weights(l);
show_images(single_weights, l.n, window); // visualize conv layer l.n
//visualize feature map according to layer l.n
image delta = get_convolutional_image(l);
image dc = collapse_image_layers(delta, 1);
char buff[256];
sprintf(buff, "%s: Output", window);
save_image(dc, buff);
//    show_image(dc, buff);
free_image(dc);
return single_weights;
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: