數(shù)據(jù)增強
數(shù)據(jù)增強是一種通過使用現(xiàn)有圖像的不同變體創(chuàng)建新的訓(xùn)練圖像來更好地概括我們的模型的技術(shù)。我們當(dāng)前的訓(xùn)練集中只有 800 張圖像,因此數(shù)據(jù)增強對于確保我們的模型不會過擬合非常重要。
對于這個問題,我使用了翻轉(zhuǎn)、旋轉(zhuǎn)、中心裁剪和隨機裁剪。
這里唯一需要記住的是確保包圍盒也以與圖像相同的方式進行轉(zhuǎn)換。
# modified from fast.ai
def crop(im, r, c, target_r, target_c):
return im[r:r+target_r, c:c+target_c]
# random crop to the original size
def random_crop(x, r_pix=8):
""" Returns a random crop"""
r, c,*_ = x.shape
c_pix = round(r_pix*c/r)
rand_r = random.uniform(0, 1)
rand_c = random.uniform(0, 1)
start_r = np.floor(2*rand_r*r_pix).astype(int)
start_c = np.floor(2*rand_c*c_pix).astype(int)
return crop(x, start_r, start_c, r-2*r_pix, c-2*c_pix)
def center_crop(x, r_pix=8):
r, c,*_ = x.shape
c_pix = round(r_pix*c/r)
return crop(x, r_pix, c_pix, r-2*r_pix, c-2*c_pix)
def rotate_cv(im, deg, y=False, mode=cv2.BORDER_REFLECT, interpolation=cv2.INTER_AREA):
""" Rotates an image by deg degrees"""
r,c,*_ = im.shape
M = cv2.getRotationMatrix2D((c/2,r/2),deg,1)
if y:
return cv2.warpAffine(im, M,(c,r), borderMode=cv2.BORDER_CONSTANT)
return cv2.warpAffine(im,M,(c,r), borderMode=mode, flags=cv2.WARP_FILL_OUTLIERS+interpolation)
def random_cropXY(x, Y, r_pix=8):
""" Returns a random crop"""
r, c,*_ = x.shape
c_pix = round(r_pix*c/r)
rand_r = random.uniform(0, 1)
rand_c = random.uniform(0, 1)
start_r = np.floor(2*rand_r*r_pix).astype(int)
start_c = np.floor(2*rand_c*c_pix).astype(int)
xx = crop(x, start_r, start_c, r-2*r_pix, c-2*c_pix)
YY = crop(Y, start_r, start_c, r-2*r_pix, c-2*c_pix)
return xx, YY
def transformsXY(path, bb, transforms):
x = cv2.imread(str(path)).astype(np.float32)
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)/255
Y = create_mask(bb, x)
if transforms:
rdeg = (np.random.random()-.50)*20
x = rotate_cv(x, rdeg)
Y = rotate_cv(Y, rdeg, y=True)
if np.random.random() > 0.5:
x = np.fliplr(x).copy()
Y = np.fliplr(Y).copy()
x, Y = random_cropXY(x, Y)
else:
x, Y = center_crop(x), center_crop(Y)
return x, mask_to_bb(Y)
def create_corner_rect(bb, color='red'):
bb = np.array(bb, dtype=np.float32)
return plt.Rectangle((bb, bb), bb-bb, bb-bb, color=color,
fill=False, lw=3)
def show_corner_bb(im, bb):
plt.imshow(im)
plt.gca().add_patch(create_corner_rect(bb))
PyTorch 數(shù)據(jù)集
現(xiàn)在我們已經(jīng)有了數(shù)據(jù)增強,我們可以進行訓(xùn)練驗證拆分并創(chuàng)建我們的 PyTorch 數(shù)據(jù)集。我們使用 ImageNet 統(tǒng)計數(shù)據(jù)對圖像進行標(biāo)準(zhǔn)化,因為我們使用的是預(yù)訓(xùn)練的 ResNet 模型并在訓(xùn)練時在我們的數(shù)據(jù)集中應(yīng)用數(shù)據(jù)增強。
X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, random_state=42)
def normalize(im):
"""Normalizes images with Imagenet stats."""
imagenet_stats = np.array([[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]])
return (im - imagenet_stats)/imagenet_stats
class RoadDataset(Dataset):
def __init__(self, paths, bb, y, transforms=False):
self.transforms = transforms
self.paths = paths.values
self.bb = bb.values
self.y = y.values
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
path = self.paths[idx]
y_class = self.y[idx]
x, y_bb = transformsXY(path, self.bb[idx], self.transforms)
x = normalize(x)
x = np.rollaxis(x, 2)
return x, y_class, y_bb
train_ds = RoadDataset(X_train['new_path'],X_train['new_bb'] ,y_train, transforms=True)
valid_ds = RoadDataset(X_val['new_path'],X_val['new_bb'],y_val)
batch_size = 64
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=batch_size)
PyTorch 模型
對于這個模型,我使用了一個非常簡單的預(yù)先訓(xùn)練的 resNet-34模型。由于我們有兩個任務(wù)要完成,這里有兩個最后的層: 包圍盒回歸器和圖像分類器。
class BB_model(nn.Module):
def __init__(self):
super(BB_model, self).__init__()
resnet = models.resnet34(pretrained=True)
layers = list(resnet.children())[:8]
self.features1 = nn.Sequential(*layers[:6])
self.features2 = nn.Sequential(*layers[6:])
self.classifier = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, 4))
self.bb = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, 4))
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = F.relu(x)
x = nn.AdaptiveAvgPool2d((1,1))(x)
x = x.view(x.shape, -1)
return self.classifier(x), self.bb(x)
訓(xùn)練
對于損失,我們需要同時考慮分類損失和邊界框回歸損失,因此我們使用交叉熵和 L1 損失(真實值和預(yù)測坐標(biāo)之間的所有絕對差之和)的組合。我已經(jīng)將 L1 損失縮放了 1000 倍,因為分類和回歸損失都在相似的范圍內(nèi)。除此之外,它是一個標(biāo)準(zhǔn)的 PyTorch 訓(xùn)練循環(huán)(使用 GPU):
def update_optimizer(optimizer, lr):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr
def train_epocs(model, optimizer, train_dl, val_dl, epochs=10,C=1000):
idx = 0
for i in range(epochs):
model.train()
total = 0
sum_loss = 0
for x, y_class, y_bb in train_dl:
batch = y_class.shape
x = x.cuda().float()
y_class = y_class.cuda()
y_bb = y_bb.cuda().float()
out_class, out_bb = model(x)
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
loss_bb = F.l1_loss(out_bb, y_bb, reduction="none").sum(1)
loss_bb = loss_bb.sum()
loss = loss_class + loss_bb/C
optimizer.zero_grad()
loss.backward()
optimizer.step()
idx += 1
total += batch
sum_loss += loss.item()
train_loss = sum_loss/total
val_loss, val_acc = val_metrics(model, valid_dl, C)
print("train_loss %.3f val_loss %.3f val_acc %.3f" % (train_loss, val_loss, val_acc))
return sum_loss/total
def val_metrics(model, valid_dl, C=1000):
model.eval()
total = 0
sum_loss = 0
correct = 0
for x, y_class, y_bb in valid_dl:
batch = y_class.shape
x = x.cuda().float()
y_class = y_class.cuda()
y_bb = y_bb.cuda().float()
out_class, out_bb = model(x)
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
loss_bb = F.l1_loss(out_bb, y_bb, reduction="none").sum(1)
loss_bb = loss_bb.sum()
loss = loss_class + loss_bb/C
_, pred = torch.max(out_class, 1)
correct += pred.eq(y_class).sum().item()
sum_loss += loss.item()
total += batch
return sum_loss/total, correct/total
model = BB_model().cuda()
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(parameters, lr=0.006)
train_epocs(model, optimizer, train_dl, valid_dl, epochs=15)
測試
現(xiàn)在我們已經(jīng)完成了訓(xùn)練,我們可以選擇一個隨機圖像并在上面測試我們的模型。盡管我們只有相當(dāng)少量的訓(xùn)練圖像,但是我們最終在測試圖像上得到了一個相當(dāng)不錯的預(yù)測。
使用手機拍攝真實照片并測試模型將是一項有趣的練習(xí)。另一個有趣的實驗是不執(zhí)行任何數(shù)據(jù)增強并訓(xùn)練模型并比較兩個模型。
# resizing test image
im = read_image('./road_signs/images_resized/road789.png')
im = cv2.resize(im, (int(1.49*300), 300))
cv2.imwrite('./road_signs/road_signs_test/road789.jpg', cv2.cvtColor(im, cv2.COLOR_RGB2BGR))
# test Dataset
test_ds = RoadDataset(pd.DataFrame([{'path':'./road_signs/road_signs_test/road789.jpg'}])['path'],pd.DataFrame([{'bb':np.array([0,0,0,0])}])['bb'],pd.DataFrame([{'y':}])['y'])
x, y_class, y_bb = test_ds
xx = torch.FloatTensor(x[None,])
xx.shape
# prediction
out_class, out_bb = model(xx.cuda())
out_class, out_bb
總結(jié)
現(xiàn)在我們已經(jīng)介紹了目標(biāo)檢測的基本原理,并從頭開始實現(xiàn)它,您可以將這些想法擴展到多對象情況,并嘗試更復(fù)雜的模型,如 RCNN 和 YOLO!