需要有关在PyTorch中学习快速学习RCNN ResNet50FPN的帮助

时间:2019-07-12 10:30:16

标签: python-3.x computer-vision pytorch faster-rcnn torchvision

我是PyTorch的新手。我正在尝试将预训练的Faster RCNN torchvision.models.detection.fasterrcnn_resnet50_fpn()用于对象检测项目。我创建了一个CustomDataset(Dataset)类来处理自定义数据集。

这是自定义类的实现


class ToTensor(object):
    """Convert ndarrays in sample to Tensors."""

    def __call__(self, sample):
        image, landmarks = sample['image'], sample['meta_data']

        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        image = image.transpose((2, 0, 1))
        return {'image': torch.from_numpy(image),
                'meta_data': landmarks}

class CustomDataset(Dataset):
    """Custom Landmarks dataset."""
    def __init__(self, data_dir, root_dir, transform=None):
        """
        Args:
            data_dir (string): Directory with all the labels(json).
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.data_dir = data_dir
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(os.listdir(self.data_dir))

    def __getitem__(self, idx):
        img_name = sorted(os.listdir(self.root_dir))[idx]
        image = io.imread(self.root_dir+'/'+img_name, plugin='matplotlib')
        json_file = sorted(os.listdir(self.data_dir))[idx]
        with open(self.data_dir+'/'+json_file) as f:
          meta_data = json.load(f)

        meta_data = meta_data['annotation']['object']
        sample = {'image': image, 'meta_data': meta_data}

        to_tensor = ToTensor()
        transformed_sample = to_tensor(sample)

        if self.transform:
            sample = self.transform(sample)

        return transformed_sample

这是train_model函数

def train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25):
    since = time.time()

    best_model = model
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'test']:
            if phase == 'train':
                optimizer = lr_scheduler(optimizer, epoch)
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            for data in dset_loaders[phase]:
                # get the inputs

                inputs, labels = data['image'], data['meta_data']

                inputs= inputs.to(device) # , 

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = model(inputs, labels)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

                # statistics
                running_loss += loss.item()
                running_corrects += torch.sum(preds == labels).item()

            epoch_loss = running_loss / dset_sizes[phase]
            epoch_acc = running_corrects / dset_sizes[phase]

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))

            # deep copy the model
            if phase == 'test' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model = copy.deepcopy(model)

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))
    return best_model

在执行model_ft = train_model(model_ft,条件,optimizer_ft,exp_lr_scheduler,num_epochs = 25)时,我收到“ RuntimeError:_thnn_upsample_bilinear2d_forward字节的CUDAType不支持”

1 个答案:

答案 0 :(得分:0)

您的数据点似乎是字节张量,即键入uint8。尝试将数据投射到float32

# Replace this
inputs = inputs.to(device)
# With this
inputs = inputs.float().to(device)

请注意,torchvision模型期望数据以特定方式归一化。检查here中的步骤,基本上需要使用

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])

用于规范化数据。