PyTorch自定义数据集dataloader返回字符串(键)而不是张量

时间:2018-06-15 15:29:25

标签: image dataset pytorch enumerate tensor

我正在尝试加载自己的数据集,并使用自定义Dataloader读取图像和标签,并将它们转换为PyTorch Tensors。但是,当实例化Dataloader时,它会返回字符串x "image"和y "labels",但在读取时不返回实际值或张量(iter

 
print(self.train_loader)  # shows a Tensor object
tic = time.time()
with tqdm(total=self.num_train) as pbar:
    for i, (x, y) in enumerate(self.train_loader):  # x and y are returned as string (where it fails)

        if self.use_gpu:
            x, y = x.cuda(), y.cuda()
        x, y = Variable(x), Variable(y)

这就是dataloader.py的样子:

 
from __future__ import print_function, division #ds
import numpy as np
from utils import plot_images

import os #ds
import pandas as pd #ds
from skimage import io, transform #ds
import torch
from torchvision import datasets
from torch.utils.data import Dataset, DataLoader #ds
from torchvision import transforms
from torchvision import utils #ds
from torch.utils.data.sampler import SubsetRandomSampler


class CDataset(Dataset):


    def __init__(self, csv_file, root_dir, transform=None):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.frame = pd.read_csv(csv_file)
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.frame)

    def __getitem__(self, idx):
        img_name = os.path.join(self.root_dir,
                                self.frame.iloc[idx, 0]+'.jpg')
        image = io.imread(img_name)
#       image = image.transpose((2, 0, 1))
        labels = np.array(self.frame.iloc[idx, 1])#.as_matrix() #ds
        #landmarks = landmarks.astype('float').reshape(-1, 2)
        #print(image.shape)
        #print(img_name,labels)
        sample = {'image': image, 'labels': labels}

        if self.transform:
            sample = self.transform(sample)

        return sample

class ToTensor(object):
    """Convert ndarrays in sample to Tensors."""


    def __call__(self, sample):
        image, labels = sample['image'], sample['labels']
        #print(image)
        #print(labels)
        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        image = image.transpose((2, 0, 1))
        #print(image.shape)
        #print((torch.from_numpy(image)))
        #print((torch.from_numpy(labels)))
        return {'image': torch.from_numpy(image),
                'labels': torch.from_numpy(labels)}


def get_train_valid_loader(data_dir,
                           batch_size,
                           random_seed,
                           #valid_size=0.1, #ds
                           #shuffle=True,
                           show_sample=False,
                           num_workers=4,
                           pin_memory=False):
    """
    Utility function for loading and returning train and valid
    multi-process iterators over the MNIST dataset. A sample
    9x9 grid of the images can be optionally displayed.

    If using CUDA, num_workers should be set to 1 and pin_memory to True.

    Args
    ----
    - data_dir: path directory to the dataset.
    - batch_size: how many samples per batch to load.
    - random_seed: fix seed for reproducibility.
    - #ds valid_size: percentage split of the training set used for
      the validation set. Should be a float in the range [0, 1].
      In the paper, this number is set to 0.1.
    - shuffle: whether to shuffle the train/validation indices.
    - show_sample: plot 9x9 sample grid of the dataset.
    - num_workers: number of subprocesses to use when loading the dataset.
    - pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
      True if using GPU.

    Returns
    -------
    - train_loader: training set iterator.
    - valid_loader: validation set iterator.
    """
    #ds
    #error_msg = "[!] valid_size should be in the range [0, 1]."
    #assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
    #ds

    # define transforms
    #normalize = transforms.Normalize((0.1307,), (0.3081,))
    trans = transforms.Compose([
        ToTensor(), #normalize,
    ])

    # load train dataset
    #train_dataset = datasets.MNIST(
    #    data_dir, train=True, download=True, transform=trans
    #)


    train_dataset = CDataset(csv_file='/home/Desktop/6June17/util/train.csv',
                                    root_dir='/home/caffe/data/images/',transform=trans)

    # load validation dataset
    #valid_dataset = datasets.MNIST( #ds
    #    data_dir, train=True, download=True, transform=trans #ds
    #)

    valid_dataset = CDataset(csv_file='/home/Desktop/6June17/util/eval.csv',
                                    root_dir='/home/caffe/data/images/',transform=trans)

    num_train = len(train_dataset) 
    train_indices = list(range(num_train)) 
    #ds split = int(np.floor(valid_size * num_train))

    num_valid = len(valid_dataset) #ds
    valid_indices = list(range(num_valid)) #ds

    #if shuffle:
    #    np.random.seed(random_seed)
    #    np.random.shuffle(indices)

    #ds train_idx, valid_idx = indices[split:], indices[:split]
    train_idx = train_indices #ds
    valid_idx = valid_indices #ds

    train_sampler = SubsetRandomSampler(train_idx)
    valid_sampler = SubsetRandomSampler(valid_idx)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=batch_size, sampler=train_sampler,
        num_workers=num_workers, pin_memory=pin_memory,
    )

    print(train_loader)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset, batch_size=batch_size, sampler=valid_sampler,
        num_workers=num_workers, pin_memory=pin_memory,
    )

    # visualize some images
    if show_sample:
        sample_loader = torch.utils.data.DataLoader(
            dataset, batch_size=9, #shuffle=shuffle,
            num_workers=num_workers, pin_memory=pin_memory
        )
        data_iter = iter(sample_loader)
        images, labels = data_iter.next()
        X = images.numpy()
        X = np.transpose(X, [0, 2, 3, 1])
        plot_images(X, labels)

    return (train_loader, valid_loader)


def get_test_loader(data_dir,
                    batch_size,
                    num_workers=4,
                    pin_memory=False):
    """
    Utility function for loading and returning a multi-process
    test iterator over the MNIST dataset.

    If using CUDA, num_workers should be set to 1 and pin_memory to True.

    Args
    ----
    - data_dir: path directory to the dataset.
    - batch_size: how many samples per batch to load.
    - num_workers: number of subprocesses to use when loading the dataset.
    - pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
      True if using GPU.

    Returns
    -------
    - data_loader: test set iterator.
    """
    # define transforms
    #normalize = transforms.Normalize((0.1307,), (0.3081,))
    trans = transforms.Compose([
        ToTensor(), #normalize,
    ])

    # load dataset
    #dataset = datasets.MNIST(
    #    data_dir, train=False, download=True, transform=trans
    #)

    test_dataset = CDataset(csv_file='/home/Desktop/6June17/util/test.csv',
                                    root_dir='/home/caffe/data/images/',transform=trans)

    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=batch_size, shuffle=False,
        num_workers=num_workers, pin_memory=pin_memory,
    )

    return test_loader


#for i_batch, sample_batched in enumerate(dataloader):
#    print(i_batch, sample_batched['image'].size(),
#          sample_batched['landmarks'].size())

#    # observe 4th batch and stop.
#    if i_batch == 3:
#        plt.figure()
#        show_landmarks_batch(sample_batched)
#        plt.axis('off')
#        plt.ioff()
#        plt.show()
#        break

最小的工作样本很难在这里发布,但基本上我正在尝试修改这个与MNIST顺利运作的项目http://torch.ch/blog/2015/09/21/rmva.html。我只是尝试使用我自己的数据集运行它,并使用上面使用的自定义dataloader.py

它实例化Dataloader,如下所示:

trainer.py中的

 
if config.is_train:
    self.train_loader = data_loader[0]
    self.valid_loader = data_loader[1]
    self.num_train = len(self.train_loader.sampler.indices)
    self.num_valid = len(self.valid_loader.sampler.indices)

- &GT;从main.py开始:

 
if config.is_train:
    data_loader = get_train_valid_loader(
        config.data_dir, config.batch_size,
        config.random_seed, #config.valid_size,
        #config.shuffle, 
        config.show_sample, **kwargs
    )

1 个答案:

答案 0 :(得分:2)

你没有正确使用python的enumerate()(x, y)目前已分配了批量词典的2个键,即字符串"image""labels"。这应该可以解决您的问题:

for i, batch in enumerate(self.train_loader):
    x, y = batch["image"], batch["labels"]
    # ...
相关问题