Kaggle: ImageNet Dog Breed Classification (Pytorch)

本文为自学碰壁的完成任务的一个记录总结,无任何参考价值

写在前面:本文章是跟着《动手深度学习》(李沐)学习过程中的动手实操,前因是对一个树叶数据集分类,但是由于自己太小白(太菜了)折腾了两三周才弄出来,但是觉得还是值得记录一下,对整个过程中自己碰壁过程进行一个总结。由于对于树叶分类的那个问题自己有点雨里雾里的,觉得没达到效果,所以才有了在 Kaggle 上 ImageNet Dog Breed Classification 的分类,本文同样也记录了树叶分类

总结:

  1. 多去论坛或者说竞赛地址看看别人写的代码,多看多练
  2. 出现问题第一时间应该去查找,可以去IDE debug ,实在不行就Print 大法,我这次有个很关键的问题就是,在树叶分类任务过程中能够,我没有将树叶类别转化为数字而是直接传入Tensor,当然结果注定是失败的,还有就是我在提取类别的时候用了一个for 循环去一个元素一个元素的遍历,花费了大量时间,而在人家的baseline 当中用一个集合思想不能有重复元素就可以解决,效率大大提升,所以多看多练
  3. 在这个过程中有个自定义dataset类,但是自己不知道是哪里出问题了,导致传入网络的数据是错误的,验证精度从一开始就为零,而训练精度是从0慢慢提升上去,到后面才发现可能是dataset将类别和图片对应传入出问题,所以出问题要有针对性的解决,不要盲目性的瞎捣鼓

ImageNet Dogs Breed Classification

Kaggle 狗的品种分类竞赛网址

自定义Dataset

from torch.utils.data import Dataset, DataLoader
import numpy as np
from PIL import Image
import pandas as pd
from torchvision import transforms
# 读取标签
dog_dataframe = pd.read_csv('../input/dog-breed-identification/labels.csv')
# 将标签转化为集合获取所有类的集合,然后再转化list进行排序
dog_labels = sorted(list(set(dog_dataframe['breed'])))
# 狗的种类数量
n_class = len(dog_labels)
# 需向将string类型的量转化为数字才能ToTensor
class_to_num = dict(zip(dog_labels,range(n_class)))


class DogsData(Dataset):
    def __init__(self, csv_path, file_path, mode='train', valid_ratio=0.1, resize_height=224, resize_width=224):

        """
       Args:
           csv_path (string): csv 文件路径
           img_path (string): 图像文件所在路径
           mode (string): 训练模式还是测试模式
           valid_ratio (float): 验证集比例
       """
        # 调整图片大小

        self.resize_height = resize_height
        self.resize_width = resize_width
        # 图片所在文件夹
        self.file_path = file_path
        self.valid_ratio = valid_ratio
        # 数据是用来训练还是验证
        self.mode = mode
        # 去掉frame头部份,将从1开始排序
        self.data_info = pd.read_csv(csv_path, header=None)
        # 得到训练数据的长度(个数)
        self.data_len = len(self.data_info.index) - 1
        self.train_len = int(self.data_len * (1 - valid_ratio))
    # 将数据集分为训练集和验证集
        if mode == 'train':
            self.train_image = np.asarray(self.data_info.iloc[1:self.train_len, 0])
            self.train_label = np.asarray(self.data_info.iloc[1:self.train_len, 1])
            self.image_arr = self.train_image
            self.label_arr = self.train_label
        elif mode == 'valid':
            self.valid_image = np.asarray(self.data_info.iloc[self.train_len:, 0])
            self.valid_label = np.asarray(self.data_info.iloc[self.train_len:, 1])
            self.image_arr = self.valid_image
            self.label_arr = self.valid_label
    
        self.real_len = len(self.image_arr)
        print('Finished reading the {} set of Leaves Dataset ({} samples found)'
              .format(mode, self.real_len))


    def __getitem__(self, index):
        single_image_name = self.image_arr[index]
    #  打开某个具体图片,给出了完整路径
        image = Image.open(self.file_path + single_image_name + '.jpg')
    #transform 采用了图像增强方法,包括大小,翻转,对比度,亮度,色调,以及正则化
        if self.mode == 'train':
            transform = transforms.Compose([
                transforms.RandomResizedCrop((224), scale=(0.64, 1), ratio=(0.8, 1.0)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
        else:
            transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
    
        image = transform(image)
    
        label = self.label_arr[index]
        number_label = class_to_num[label]
    
        return image, number_label
    
    # 返回数据集长度
    def __len__(self):
        return self.real_len
    

        

训练部分

采用了学习率cos下降的方法,采用了AdamW优化函数,采用了Resnext50_32x4d网络


import torchvision
import sys
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
import pandas as pd
# This is for the progress bar.
from tqdm import tqdm
import torch.nn as nn
def train(lr, weight_decay, num_epochs):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.\n".format(device))
    
    train_path = '../input/dog-breed-identification/labels.csv'
    file_path =  '../input/dog-breed-identification/train/'

    train_dataset = DogsData(train_path, file_path, mode='train' )
    valid_dataset = DogsData(train_path, file_path, mode='valid' )
    
    train_loader = torch.utils.data.DataLoader(
            dataset=train_dataset,
            batch_size=8, 
            shuffle=True,
            num_workers=2
        )
    valid_loader = torch.utils.data.DataLoader(
            dataset=valid_dataset,
            batch_size=8,
            shuffle=False,
            num_workers=2
        )
    
    net = torchvision.models.resnext50_32x4d(pretrained=True)
    in_channel = net.fc.in_features
    net.fc = nn.Sequential(nn.Linear(in_channel, n_class))
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    optimizer = torch.optim.AdamW(net.parameters(), lr=lr, weight_decay=weight_decay)
    scheduler = CosineAnnealingLR(optimizer, T_max=10)
    model_path = './dog_resnext'
    best_acc = 0.0
    for epoch in range(num_epochs):
        # ---------- Training ----------
        # Make sure the model is in train mode before training.
        net.train() 
        # These are used to record information in training.
        train_loss = []
        train_accs = []
        # Iterate the training set by batches.
        for batch in tqdm(train_loader):
            # A batch consists of image data and corresponding labels.
            imgs, labels = batch
            imgs = imgs.to(device)
            labels = labels.to(device)
            # Forward the data. (Make sure data and model are on the same device.)
            logits = net(imgs)
            # Calculate the cross-entropy loss.
            # We don't need to apply softmax before computing cross-entropy as it is done automatically.
            loss = loss_function(logits, labels)

            # Gradients stored in the parameters in the previous step should be cleared out first.
            optimizer.zero_grad()
            # Compute the gradients for parameters.
            loss.backward()
            # Update the parameters with computed gradients.
            optimizer.step()

            # Compute the accuracy for current batch.
            acc = (logits.argmax(dim=-1) == labels).float().mean()

            # Record the loss and accuracy.
            train_loss.append(loss.item())
            train_accs.append(acc)
        
        # The average loss and accuracy of the training set is the average of the recorded values.
        train_loss = sum(train_loss) / len(train_loss)
        train_acc = sum(train_accs) / len(train_accs)

        # Print the information.
        print(f"[ Train | {epoch + 1:03d}/{num_epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f}")

        scheduler.step()
        # ---------- Validation ----------
        # Make sure the model is in eval mode so that some modules like dropout are disabled and work normally.
        net.eval()
        # These are used to record information in validation.
        valid_loss = []
        valid_accs = []

        # Iterate the validation set by batches.
        for batch in tqdm(valid_loader):
            imgs, labels = batch
            # We don't need gradient in validation.
            # Using torch.no_grad() accelerates the forward process.
            with torch.no_grad():
                logits = net(imgs.to(device))

            # We can still compute the loss (but not the gradient).
            loss = loss_function(logits, labels.to(device))

            # Compute the accuracy for current batch.
            acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()

            # Record the loss and accuracy.
            valid_loss.append(loss.item())
            valid_accs.append(acc)

        # The average loss and accuracy for entire validation set is the average of the recorded values.
        valid_loss = sum(valid_loss) / len(valid_loss)
        valid_acc = sum(valid_accs) / len(valid_accs)

        # Print the information.
        print(f"[ Valid | {epoch + 1:03d}/{num_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}")

        # if the model improves, save a checkpoint at this epoch
        if valid_acc > best_acc:
            best_acc = valid_acc
            torch.save(net.state_dict(), model_path)
        print('saving model with acc {:.3f}\n\n'.format(best_acc))

经过训练后,最好的准确率达到了87%,
在这里插入图片描述

动手深度学习实战:树叶分类

李沐大神出的教程-动手深度学习 Kaggle 树叶分类
感兴趣上B站搜索李沐就好
如果真的感兴趣可以去这篇baseline 去看看,大佬思路清晰,看完简直如醍醐灌顶
同样有其他大佬甚至将精度提高打了99%以上,感兴趣可以去看看别人怎么写的

dataset

import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import sys
import os
import matplotlib.pyplot as plt
import torchvision.models as models
# This is for the progress bar.
from tqdm import tqdm

labels_dataframe = pd.read_csv('../input/classify-leaves/train.csv')
leaves_labels = sorted(list(set(labels_dataframe['label'])))
num_classes = len(leaves_labels)
class_to_num = dict(zip(leaves_labels, range(num_classes)))


class LeavesData(Dataset):
    def __init__(self, csv_path, file_path, mode='train', valid_ratio=0.2, resize_height=256, resize_width=256):
        """
        Args:
            csv_path (string): csv 文件路径
            img_path (string): 图像文件所在路径
            mode (string): 训练模式还是测试模式
            valid_ratio (float): 验证集比例
        """
        
        # 需要调整后的照片尺寸,我这里每张图片的大小尺寸不一致#
        self.resize_height = resize_height
        self.resize_width = resize_width

        self.file_path = file_path
        self.mode = mode

        # 读取 csv 文件
        # 利用pandas读取csv文件
        self.data_info = pd.read_csv(csv_path, header=None)  #header=None是去掉表头部分
        # 计算 length
        self.data_len = len(self.data_info.index) - 1
        self.train_len = int(self.data_len * (1 - valid_ratio))
        
        if mode == 'train':
            # 第一列包含图像文件的名称
            self.train_image = np.asarray(self.data_info.iloc[1:self.train_len, 0])  #self.data_info.iloc[1:,0]表示读取第一列,从第二行开始到train_len
            # 第二列是图像的 label
            self.train_label = np.asarray(self.data_info.iloc[1:self.train_len, 1])
            self.image_arr = self.train_image 
            self.label_arr = self.train_label
        elif mode == 'valid':
            self.valid_image = np.asarray(self.data_info.iloc[self.train_len:, 0])  
            self.valid_label = np.asarray(self.data_info.iloc[self.train_len:, 1])
            self.image_arr = self.valid_image
            self.label_arr = self.valid_label
        elif mode == 'test':
            self.test_image = np.asarray(self.data_info.iloc[1:, 0])
            self.image_arr = self.test_image
            
        self.real_len = len(self.image_arr)

        print('Finished reading the {} set of Leaves Dataset ({} samples found)'
              .format(mode, self.real_len))

    def __getitem__(self, index):
        # 从 image_arr中得到索引对应的文件名
        single_image_name = self.image_arr[index]

        # 读取图像文件
        img_as_img = Image.open(self.file_path + single_image_name)

        #如果需要将RGB三通道的图片转换成灰度图片可参考下面两行
#         if img_as_img.mode != 'L':
#             img_as_img = img_as_img.convert('L')

        #设置好需要转换的变量,还可以包括一系列的nomarlize等等操作
        if self.mode == 'train':
            transform = transforms.Compose([
                transforms.Resize((224, 224)),
                transforms.RandomResizedCrop(224, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)),
                transforms.RandomHorizontalFlip(p=0.5),   #随机水平翻转 选择一个概率
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
        else:
            # valid和test不做数据增强
            transform = transforms.Compose([
                transforms.Resize((224, 224)),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])
        
        img_as_img = transform(img_as_img)
        
        if self.mode == 'test':
            return img_as_img
        else:
            # 得到图像的 string label
            label = self.label_arr[index]
            # number label
            number_label = class_to_num[label]

            return img_as_img, number_label  #返回每一个index对应的图片数据和对应的label

    def __len__(self):
        return self.real_len

训练部分

from torch.optim.lr_scheduler import CosineAnnealingLR
def train(lr, weight_decay, num_epochs):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.\n".format(device))
    train_path = '../input/classify-leaves/train.csv'
    img_path = '../input/classify-leaves/'
    train_dataset = LeavesData(train_path, img_path, mode='train')
    val_dataset = LeavesData(train_path, img_path, mode='valid')
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=16, 
        shuffle=False,
        num_workers=2
            )

    val_loader = torch.utils.data.DataLoader(
            dataset=val_dataset,
            batch_size=16, 
            shuffle=False,
            num_workers=2
          )
    net = models.resnext50_32x4d(pretrained=True)
    #model_weight_path = "../input/resnet-module/resnet34.pth"      # 迁移学习的模型参数
    #assert os.path.exists(model_weight_path), "file {} does not exist.".format(model_weight_path)
    #net.load_state_dict(torch.load(model_weight_path, map_location='cpu'))
    # for param in net.parameters():
    #     param.requires_grad = False

    # change fc layer structure
    in_channel = net.fc.in_features
    net.fc = nn.Sequential(nn.Linear(in_channel, num_classes))
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    
    optimizer = torch.optim.AdamW(net.parameters(), lr=lr, weight_decay=weight_decay)
    scheduler = CosineAnnealingLR(optimizer, T_max=10)
    best_acc = 0.0
    save_path = './my_leaves_resnext50_32x4d.pth'
    train_steps = len(train_loader)
    val_steps = len(val_loader)
    for epoch in range(num_epochs):
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        train_acc, train_total = 0.0, 0
        for step, (images, label) in enumerate(train_bar):
            optimizer.zero_grad()
            images = images.to(device)
            label = label.to(device)
            y_hat = net(images)
            loss = loss_function(y_hat, label)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            train_predict_y = torch.max(y_hat, dim=1)[1]
            train_total += len(label)
            train_acc += torch.eq(train_predict_y, label).sum().item()            
            
            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,num_epochs,loss)
            del loss, images, label, y_hat
            torch.cuda.empty_cache()
        train_accurate = train_acc / train_total
        scheduler.step()
        net.eval()
        val_acc = 0.0  # accumulate accurate number / epoch
        val_loss = 0.0
        val_total = 0
        with torch.no_grad():
            val_bar = tqdm(val_loader, file=sys.stdout)
            for val_data in val_bar:
                val_images, val_labels = val_data
                val_images = val_images.to(device)
                val_labels = val_labels.to(device)
                outputs = net(val_images)
                loss = loss_function(outputs, val_labels)
                val_predict_y = torch.max(outputs, dim=1)[1]
                val_total += len(val_labels)
                val_loss += loss.item()
                val_acc += torch.eq(val_predict_y, val_labels).sum().item()
                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1, num_epochs)
                del outputs, val_images, val_labels
                torch.cuda.empty_cache()
        val_accurate =  val_acc / val_total
        print('[epoch %d] train_loss: %.3f  train_accuracy: %.3f  val_loss: %.3f val_accuracy: %.3f \n' %
              (epoch + 1, running_loss / train_steps, train_accurate, val_loss / val_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)
        print('best accuracy: {:.3f}\n'.format(best_acc))

    print('Finished Training')

最后的验证最好精度是93%,他的训练精度比他稍小(因为加入了很强的图像增广),其实最好的精度达到了95%但是值得注意的是它的训练精度达到了99%,我认为知识一个过拟合的信号,所以选择放弃这个,而是选择前面了那个
在这里插入图片描述