未找到 Cyclegan 的列表索引

时间:2021-06-06 14:13:45

标签: python pytorch generative-adversarial-network

我多次收到此错误。我尝试了不同的方法来解决这个问题,但最终我失败了。如果你们中的任何人能帮我解决这个问题,那将是非常有帮助的。 我不断收到的错误是找不到列表索引。我知道原因是因为它不断访问列表索引之外的内容,但我找不到解决方法。

import  os
import torch
import torchvision.transforms as T
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
from PIL import Image

data_dir='./cyclegan/data/train/'

class CycleganDataset(Dataset):
    def __init__(self, zebra_dir, horse_dir, transform=None):
        self.zebra_dir = zebra_dir
        self.horse_dir = horse_dir
        self.transform = transform

        self.zebra_images = os.listdir(zebra_dir)
        self.horse_images = os.listdir(horse_dir)
        self.length_dataset = max(len(self.zebra_images), len(self.horse_images)) # 1000, 1500
        self.zebra_len = len(self.zebra_images)
        self.horse_len = len(self.horse_images)

    def __len__(self):
        return self.length_dataset

    def __getitem__(self, index):
        zebra_img = self.zebra_images[index]
        horse_img = self.horse_images[index]

        zebra_path = os.path.join(self.zebra_dir, zebra_img)
        horse_path = os.path.join(self.horse_dir, horse_img)

        zebra_img = np.array(Image.open(zebra_path).convert("RGB"))
        horse_img = np.array(Image.open(horse_path).convert("RGB"))

        if self.transform:
            augmentations = self.transform(image=zebra_img, image0=horse_img)
            zebra_img = augmentations["image"]
            horse_img = augmentations["image0"]

        return zebra_img, horse_img


import albumentations as A
from albumentations.pytorch import ToTensorV2

horse_dir=data_dir+'horse'
zebra_dir=data_dir+'zebra'

transforms=A.Compose([
    A.Resize(width=256, height=256),
    A.HorizontalFlip(p=0.5),
    A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_pixel_value=255),
    ToTensorV2(),
 ],
 additional_targets={"image0": "image"},
)



ds=CycleganDataset(horse_dir,zebra_dir,transforms)

import torch.nn as nn
import torch.functional as F
from torch.utils.data import DataLoader

train_dl=DataLoader(ds,batch_size=1,shuffle=True,num_workers = 0, pin_memory = True)

def get_default_device():
    if torch.cuda.is_available():
        return torch.device('cuda')
    else:
        return torch.device('cpu')

def to_device(data, device):
    if isinstance(data, (list,tuple)):
        return [to_device(x, device) for x in data]
    return data.to(device, non_blocking=True)

class DeviceDataLoader():
    def __init__(self, dl, device):
        self.dl = dl
        self.device = device

    def __iter__(self):
        for b in self.dl: 
            yield to_device(b, self.device) 

    def __len__(self):
        return len(self.dl)

device=get_default_device()
train_dl=DeviceDataLoader(train_dl,device)

class Block(nn.Module):
    def __init__(self,in_channels,out_channels,stride):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels,out_channels,4,stride,1,bias=True,padding_mode='reflect'),
            nn.InstanceNorm2d(out_channels),
            nn.LeakyReLU(0.2),
            )

    def forward(self,x):
        return self.conv(x)

class Discriminator(nn.Module):
    def __init__(self,in_channels=3,features=[64,128,256,512]):
        super().__init__()
        self.initial=nn.Sequential(
            nn.Conv2d(in_channels,
            features[0],
            kernel_size=4,
            stride=2,
            padding=1,
            padding_mode='reflect'
         ),
            nn.LeakyReLU(0.2)
        )

        layers=[]
        in_channels=features[0]
        for feature in features[1:]:
            layers.append(Block(in_channels,feature,stride=1 if feature==features[-1] else 2))
            in_channels=feature
            layers.append(nn.Conv2d(in_channels,1,kernel_size=4,stride=1,padding=1,
                     padding_mode='reflect'))
            self.model=nn.Sequential(*layers)

    def forward(self,x):
        x=self.initial(x)
        return torch.sigmoid(self.model(x))

class ConvBlock(nn.Module):
    def __init__(self,in_channels,out_channels,down=True,use_act=True,**kwargs):
        super().__init__()
        self.conv=nn.Sequential(
            nn.Conv2d(in_channels,out_channels,padding_mode='reflect',**kwargs)
            if down
            else nn.ConvTranspose2d(in_channels,out_channels,**kwargs),
            nn.InstanceNorm2d(out_channels),
            nn.ReLU(inplace=True) if use_act else nn.Identity()

        )

    def forward(self,x):
        return self.conv(x)

class RedidualBlock(nn.Module):
    def __init__(self,channels):
        super().__init__()
        self.block=nn.Sequential(
            ConvBlock(channels,channels,kernel_size=3,padding=1,stride=1),
            ConvBlock(channels,channels,use_act=False,kernel_size=3,padding=1,stride=1),
        
        )

    def forward(self,x):
        return x+self.block(x)

class Generator(nn.Module):
    def __init__(self,img_channels,num_features=64,num_residuals=9):
        super().__init__()
        self.initial=nn.Sequential(
             nn.Conv2d(img_channels,num_features,kernel_size=7,stride=1,padding=3,
             padding_mode='reflect'),
             nn.ReLU(inplace=True),
            )
        self.down_blocks=nn.ModuleList([
            ConvBlock(num_features,num_features*2,kernel_size=3,stride=2,padding=1),
            ConvBlock(num_features*2,num_features*4,kernel_size=3,stride=2,padding=1)
        ])

        self.residual_blocks=nn.Sequential(
            *[RedidualBlock(num_features*4) for _ in range(num_residuals)]
        )

        self.up_blocks=nn.ModuleList(
            [
               ConvBlock(num_features*4,num_features*2,down=False,kernel_size=3,stride=2,
                  padding=1,output_padding=1),
               ConvBlock(num_features*2,num_features*1,down=False,kernel_size=3,stride=2,
                  padding=1,output_padding=1)
            ]
        )

        self.last=nn.Conv2d(num_features*1,img_channels,kernel_size=7,stride=1,
                  padding=3,padding_mode='reflect')

    def forward(self,x):
        x=self.initial(x)
        for layer in self.down_blocks:
            x=layer(x)
        x=self.residual_blocks(x)
        for layer in self.up_blocks:
            x=layer(x)
    
        return torch.tanh(self.last(x))

import torch.optim as optim
from torchvision.utils import save_image

def train(epochs,lr):
    disc_H=Discriminator(in_channels=3).to(device)
    disc_Z=Discriminator(in_channels=3).to(device)
    gen_H=Generator(img_channels=3,num_residuals=9).to(device)
    gen_Z=Generator(img_channels=3,num_residuals=9).to(device)

    opt_disc=optim.Adam(
        list(disc_H.parameters())+list(disc_Z.parameters()),
        lr=lr,
        betas=(0.5,0.999),
    )
    opt_gen=optim.Adam(
        list(gen_H.parameters())+list(gen_Z.parameters()),
        lr=lr,
        betas=(0.5,0.999),
    )

    L1=nn.L1Loss()
    mse=nn.MSELoss()

    g_scaler=torch.cuda.amp.GradScaler()
    d_scaler=torch.cuda.amp.GradScaler()
    loop=tqdm(train_dl,leave=True)

    for epoch in range(epochs):
        for idx,(zebra,horse) in enumerate(train_dl):
            zebra=zebra.to(device)
            horse=horse.to(device)
    
            with  torch.cuda.amp.autocast():
                fake_horse=gen_H(zebra)
                D_H_real=disc_H(horse)
                D_H_fake=disc_H(fake_horse)
                H_reals+=D_H_real.mean().item()
                H_fakes+=D_H_fake.mean().item()
                D_H_real_loss=mse(D_H_real,torch.ones_like(D_H_real))
                D_H_fake_loss=mse(D_H_fake,torch.zeros_like(D_H_fake))
                D_H_loss=D_H_real_loss+D_H_fake_loss
        
                fake_zebra=gen_Z(horse)
                D_Z_real = disc_Z(zebra)
                D_Z_fake = disc_Z(fake_zebra)
                D_Z_real_loss = mse(D_Z_real, torch.ones_like(D_Z_real))
                D_Z_fake_loss = mse(D_Z_fake, torch.zeros_like(D_Z_fake))
                D_Z_loss = D_Z_real_loss + D_Z_fake_loss
        
                D_loss=(D_H_loss+D_Z_loss)/2
        
           opt_disc.zero_grad()
           d_scaler.scale(D_loss).backward()
           d_scaler.step(opt_disc)
           d_scaler.update()
    
           with torch.cuda.amp.autocast():
               D_H_fake=disc_H(fake_horse)
               D_Z_fake=disc_Z(fake_zebra)
               loss_G_H=mse(D_H_fake,torch.ones_like(D_H_fake))
               loss_G_Z = mse(D_Z_fake, torch.ones_like(D_Z_fake))
        
               cycle_zebra=gen_Z(fake_horse)
               cycle_horse=gen_H(fake_zebra)
               cycle_zebra_loss=L1(zebra,cycle_zebra)
               cycle_horse_loss=L1(horse,cycle_horse)
         
               G_loss = (
                   loss_G_Z
                   + loss_G_H
                   + cycle_zebra_loss #* 10
                   + cycle_horse_loss #* 10
                )
        
           opt_gen.zero_grad()
           g_scaler.scale(G_loss).backward()
           g_scaler.step(opt_gen)
           g_scaler.update()
    
           if idx % 200 == 0:
               save_image(fake_horse*0.5+0.5, f"saved_images/horse_{idx}.png")
               save_image(fake_zebra*0.5+0.5, f"saved_images/zebra_{idx}.png")

        loop.set_postfix(H_real=H_reals/(idx+1), H_fake=H_fakes/(idx+1))

train(10,1e-5)

error message part1

error message part2

0 个答案:

没有答案
相关问题