我修改了original fine-tuning tutorial in PyTorch以便我可以做LOOCV . 在这里,存在一些可能的问题,使得我当前使用的数据加载器甚至在待测试的样本上应用转换(不应该这样做) . 此外,在火车上,它不知何故只获得一个样本 . 如何修复以下代码?

为简单起见,我在10个图像,2个类和2个时期运行它 .

from __future__ import print_function, division

import torch
from torch.autograd import Variable
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy



import torch.utils.data as data_utils
from torch.utils import data


data_transforms = {
    'train': transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(20),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}


data_dir = "test_images"

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")



def train_model(model, criterion, optimizer, scheduler, train_input, train_label, num_epochs=25):
    since = time.time()


    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)


        scheduler.step()
        model.train()  # Set model to training mode

        running_loss = 0.0
        running_corrects = 0

        # Iterate over data.
        train_input = train_input.to(device)
        train_label = train_label.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward
        # track history if only in train
        with torch.set_grad_enabled(True):
            output = model(train_input)
            _, pred = torch.max(output, 1)
            loss = criterion(output, train_label)

            # backward + optimize only if in training phase

            loss.backward()
            optimizer.step()

        # statistics
        running_loss += loss.item() * train_input.size(0)
        running_corrects += torch.sum(pred == train_label.data)

        epoch_loss = running_loss / dataset_size['train']
        epoch_acc = running_corrects.double() / dataset_size['train']

        print('train Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    return model



######################################################################
# Finetuning the convnet
# ----------------------
#
# Load a pretrained model and reset final fully connected layer.
#

model_ft = models.resnet50(pretrained=True)

num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)

model_ft = model_ft.to(device)

criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)



#model_ft = model_ft.cuda()
nb_samples = 10
nb_classes = 2



image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
                                          data_transforms[x])
                  for x in ['train']}

dataset_size = {x: len(image_datasets[x]) for x in ['train']}
class_names = image_datasets['train'].classes

# LOOCV
loocv_preds = []
loocv_targets = []
for idx in range(nb_samples):

    print('Using sample {} as test data'.format(idx))

    # Get all indices and remove test sample
    train_indices = list(range(len(image_datasets['train'])))
    del train_indices[idx]

    # Create new sampler
    sampler = data.SubsetRandomSampler(train_indices)

    dataloader = data.DataLoader(
        image_datasets['train'],
        num_workers=2,
        batch_size=1,
        sampler=sampler
    )

    # Train model
    for batch_idx, (sample, target) in enumerate(dataloader):
        print('Batch {}'.format(batch_idx))
        model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, sample, target, num_epochs=2) # do I add this line here?

    # Test on LOO sample
    model_ft.eval()
    test_data, test_target = image_datasets['train'][idx]
    test_data = test_data.cuda()
    test_target = torch.tensor(test_target)
    test_target = test_target.cuda()
    test_data.unsqueeze_(0)
    test_target.unsqueeze_(0)
    output = model_ft(test_data)
    pred = torch.argmax(output, 1)
    loocv_preds.append(pred)
    loocv_targets.append(test_target.item())


print("loocv preds: ", loocv_preds)
print("loocv targets: ", loocv_targets)
print(accuracy_score(loocv_targets, loocv_preds))
print(confusion_matrix(loocv_targets, loocv_preds))

基本上在上面的代码中,我应该如何修改以下一段代码,这些代码不会对剩下要进行测试的一个样本应用转换?

dataloader = data.DataLoader(
    image_datasets['train'],
    num_workers=2,
    batch_size=1,
    sampler=sampler
)

我也非常怀疑这句话:

for batch_idx, (sample, target) in enumerate(dataloader):
        print('Batch {}'.format(batch_idx))
        model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, sample, target, num_epochs=2) # do I add this line here?

只传递一个样本进行训练是否有意义?我怎样才能解决这个问题?完整的输出可以在这里找到:https://pastebin.com/SKQNRQNa

具体来说,我不知道如何解决这个答案中提到的第二个要点:https://discuss.pytorch.org/t/training-phase-of-leave-one-out-cross-validation/30138/2?u=mona_jalal

另外,如果您建议使用Skorch,请告诉我如何在skorch转学习教程中应用"LOOCV"? https://colab.research.google.com/github/dnouri/skorch/blob/master/notebooks/Transfer_Learning.ipynb#scrollTo=IY4BAQUJLUiT