您的位置:首页 > 其它

用fast rcnn训练自己的数据集时遇到的问题索引问题

2017-11-08 18:25 344 查看
不知道是不是版本的原因,用fast rcnn训练自己的数据集时,好几次碰到了数据类型出错的问题(索引不是整型)。

一是,minibatch.py中,_sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes)函数中, if fg_ins.size > 0: fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)一直报错,原因是非整型不能做索引。通过排除法找到出错的变量为size=fg_rois_per_this_image,找到其定义的地方,发现fg_rois_per_this_image是从get_minibatch中传过来的。因此get_minibatch(roidb,
num_classes)函数中,labels, overlaps, im_rois, bbox_targets, bbox_loss = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes)也一直报错,通过输出不同的变量,发现fg_rois_per_image不是整型,这与报错的类型相符。查找fg_rois_per_image定义的地方,发现fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION
* rois_per_image),其计算结果不是整型。原因是numpy.round函数,只进行四舍五入,不进行取整,导致以该结果进行索引时,一直报错。

def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)  修改为  fg_rois_per_image = int(np.round(cfg,TRAIN>FG_FRACTION * rois_per_image))

# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)

# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))

# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# all_overlaps = np.hstack((all_overlaps, overlaps))

# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)

blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}

if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob

return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']

# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)

# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)

# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]

bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)

return labels, overlaps, rois, bbox_targets, bbox_loss_weights



二是,minibatch.py中,_get_bbox_regression_labels(bbox_target_data, num_class)函数中,bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]一直报错。原因也是索引不是整型。通过输出变量,发现 start 不是整型。找到其定义的地方: start = 4 * cls,其结果不是整型。再往前看,cls = clss[ind]不是整型,因此在定义start的时候,定义其为整型。

def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.

This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.

Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls    修改为   start = int( 4 * cls )
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: