CNN的写作培训模型

时间:2019-02-14 19:24:30

标签: python conv-neural-network training-data chainer

我正在为TwoStream-IQA写一个训练代码,它是一个两流卷积神经网络。该模型预测通过网络的两个流评估的补丁的质量得分。在下面的培训中,我使用了上面GitHub链接中提供的测试数据集。

培训代码如下:

import os
import time
import numpy as np
import argparse

import chainer

chainer.global_config.train=True

from chainer import cuda
from chainer import serializers
from chainer import optimizers
from chainer import iterators
from chainer import training 
from chainer.training import extensions
from PIL import Image
from sklearn.feature_extraction.image import extract_patches

from model import Model

parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('--model', '-m', default='', 
                    help='path to the trained model')
parser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID')

args = parser.parse_args()


model = Model()

cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
model.to_gpu()

## prepare training data 
test_label_path = 'data_list/test.txt'
test_img_path = 'data/live/'
test_Graimg_path = 'data/live_grad/'
save_model_path = '/models/nr_sana_2stream.model'

patches_per_img = 256
patchSize = 32

print('-------------Load data-------------')
final_train_set = []
with open(test_label_path, 'rt') as f:
    for l in f:
        line, la = l.strip().split()  # for debug

        tic = time.time()
        full_path = os.path.join(test_img_path, line)
        Grafull_path = os.path.join(test_Graimg_path, line)

        inputImage = Image.open(full_path)
        Graf = Image.open(Grafull_path)
        img = np.asarray(inputImage, dtype=np.float32)
        Gra = np.asarray(Graf, dtype=np.float32)
        img = img.transpose(2, 0, 1)
        Gra = Gra.transpose(2, 0, 1)

        img1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        img1[0, :, :, :] = img
        Gra1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        Gra1[0, :, :, :] = Gra

        patches = extract_patches(img, (3, patchSize, patchSize), patchSize)
        Grapatches = extract_patches(Gra, (3, patchSize, patchSize), patchSize)

        X = patches.reshape((-1, 3, patchSize, patchSize))
        GraX = Grapatches.reshape((-1, 3, patchSize, patchSize))

        temp_slice1 = [X[int(float(index))] for index in range(256)]
        temp_slice2 = [GraX[int(float(index))] for index in range(256)]
        ##############################################  
        for j in range(len(temp_slice1)):
            temp_slice1[j] = xp.array(temp_slice1[j].astype(np.float32))
            temp_slice2[j] = xp.array(temp_slice2[j].astype(np.float32))

            final_train_set.append((
                np.asarray((temp_slice1[j], temp_slice2[j])).astype(np.float32),
                int(la)
                ))      
        ##############################################  
print('--------------Done!----------------')

print('--------------Iterator!----------------')    
train_iter = iterators.SerialIterator(final_train_set, batch_size=4)
optimizer = optimizers.Adam()
optimizer.use_cleargrads()
optimizer.setup(model)

updater = training.StandardUpdater(train_iter, optimizer, device=0)

print('--------------Trainer!----------------') 
trainer = training.Trainer(updater, (50, 'epoch'), out='result')

trainer.extend(extensions.LogReport())

trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'elapsed_time']))

print('--------------Running trainer!----------------') 
trainer.run()

但是代码在trainer.run()行上产生错误,如下所示:

-------------Load data-------------
--------------Done!----------------
--------------Iterator!----------------
--------------Trainer!----------------
--------------Running trainer!----------------
Exception in main training loop: Unsupported dtype object
Traceback (most recent call last):
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
    update()
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
    self.update_core()
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
    in_arrays = self.converter(batch, self.device)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
    return to_device(device, _concat_arrays(batch, padding))
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
    return cuda.to_gpu(x, device)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
    return _array_to_gpu(array, device_, stream)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
    return cupy.asarray(array)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
    return core.array(a, dtype, False)
  File "cupy/core/core.pyx", line 2049, in cupy.core.core.array
  File "cupy/core/core.pyx", line 2083, in cupy.core.core.array
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):

  File "<ipython-input-69-12b84b41c6b9>", line 1, in <module>
    runfile('/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py', wdir='/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master')

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 668, in runfile
    execfile(filename, namespace)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 108, in execfile
    exec(compile(f.read(), filename, 'exec'), namespace)

  File "/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py", line 129, in <module>
    trainer.run()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 330, in run
    six.reraise(*sys.exc_info())

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/six.py", line 693, in reraise
    raise value

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
    update()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
    self.update_core()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
    in_arrays = self.converter(batch, self.device)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
    return to_device(device, _concat_arrays(batch, padding))

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
    return cuda.to_gpu(x, device)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
    return _array_to_gpu(array, device_, stream)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
    return cupy.asarray(array)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
    return core.array(a, dtype, False)

  File "cupy/core/core.pyx", line 2049, in cupy.core.core.array

  File "cupy/core/core.pyx", line 2083, in cupy.core.core.array

ValueError: Unsupported dtype object

也许那是因为我错误地training data,因为该模型将训练参数设为:

length = x_data.shape[0]
x1 = Variable(x_data[0:length:2])
x2 = Variable(x_data[1:length:2])

y_data为:

t = xp.repeat(y_data[0:length:2], 1)

变量final_train_settuple (Numpy Array, 66)的数据集,其中每个Numpy Array的维度为(2, 3, 32, 32),其中包含两种类型的补丁(3, 32, 32)

我使用了上面提供的github链接中的数据集。 我是Chainer的新手,请帮忙!

2 个答案:

答案 0 :(得分:1)

简而言之,您不恰当地称呼numpy.asarraynumpy.asarray不能串联两个cupy.ndarray,而可以串联两个numpy.ndarray

您的代码简介:

import numpy, cupy

final_train_set = []

N_PATCH_PER_IMAGE = 8

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        temp_slice_1[j] = cupy.array(temp_slice_1[j])
        temp_slice_2[j] = cupy.array(temp_slice_2[j])
        final_train_set.append(
            [
                # attempting to concatenate two cupy arrays by numpy.asarray 
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

错误

import numpy as np
import cupy as cp

print("two numpy arrays")
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]))
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]).dtype)

print()

print("two cupy arrays")
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]))
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]).dtype)
two numpy arrays
[[0.]
 [0.]]
float64

two cupy arrays
[[array(0.)]
 [array(0.)]]
object

解决方案:注释掉两行

import numpy  # not import cupy here

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        # temp_slice_1[j] = cupy.array(temp_slice_1[j]) <- comment out!
        # temp_slice_2[j] = cupy.array(temp_slice_2[j]) <- comment out!
        final_train_set.append(
            [
                # concatenate two numpy arrays: usually cupy should not be used in dataset
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

脚注

  1. 在您提供的代码中,未指定xp,因此您无法从任何人那里得到答案。如果您无法分离出问题,请发布包括模型在内的代码的整个实体。

  2. 我想您可能由于其他原因无法运行培训代码。在此代码中,数据首先以final_train_set的结构被带到主存储器。但是,如果图像数量巨大,则主内存将用完,MemoryError将被提高。 (换句话说,如果图像数量少并且您的内存足够大,则不会发生错误) 在这种情况下,以下参考文献(Chainer at glanceDataset Abstraction)会有所帮助。

答案 1 :(得分:-1)

免责声明:这些代码都不是我写的

我发现this的Github存储库使用OpenCV,Scipy和其他一些模块进行了质量评估。这是代码:

# Python code for BRISQUE model
# Original paper title: No-Reference Image Quality Assessment in the Spatial Domain
# Link: http://ieeexplore.ieee.org/document/6272356/
import cv2
import numpy as np
from scipy import ndimage
import math

def get_gaussian_filter():
    [m,n] = [(ss - 1.0) / 2.0 for ss in (shape,shape)]
    [y,x] = np.ogrid[-m:m+1,-n:n+1]
    window = np.exp( -(x*x + y*y) / (2.0*sigma*sigma) )
    window[window < np.finfo(window.dtype).eps*window.max() ] = 0
    sum_window = window.sum()
    if sum_window != 0:
        window = np.divide(window, sum_window)
    return window

def lmom(X):
    (rows, cols)  = X.shape
    if cols == 1:
        X = X.reshape(1,rows)
    n = rows
    X.sort()    
    b = np.zeros(3)    
    b0 = X.mean()    
    for r in range(1,4):        
        Num = np.prod(np.tile(np.arange(r+1,n+1), (r,1))-np.tile(np.arange(1,r+1).reshape(r,1),(1,n-r)),0)        
        Num = Num.astype(np.float)                
        Den = np.prod(np.tile(n, (1, r)) - np.arange(1,r+1), 1)        
        b[r-1] = 1.0/n * sum(Num/Den * X[0,r:])
    L = np.zeros(4)
    L[0] = b0
    L[1] = 2*b[0] - b0
    L[2] = 6*b[1] - 6*b[0] + b0
    L[3] = 20*b[2] - 30*b[1] + 12*b[0] - b0
    return L

def compute_features(im):
    im = im.astype(np.float)
    window = get_gaussian_filter()
    scalenum = 2
    feat = []
    for itr_scale in range(scalenum):
        mu = cv2.filter2D(im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT)
        mu_sq = mu * mu
        sigma = np.sqrt(abs(cv2.filter2D(im*im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT) - mu_sq))        
        structdis = (im-mu)/(sigma+1)
        structdis_col_vector = np.reshape(structdis.transpose(), (structdis.size,1))
        L = lmom(structdis.reshape(structdis.size,1))
        feat = np.append(feat,[L[1], L[3]])
        shifts = [[0,1], [1,0], [1,1], [-1,1]]
        for itr_shift in shifts:
            shifted_structdis = np.roll(structdis, itr_shift[0], axis=0)
            shifted_structdis = np.roll(shifted_structdis, itr_shift[1], axis=1)

            shifted_structdis_col_vector = np.reshape(shifted_structdis.T, (shifted_structdis.size,1))
            pair = structdis_col_vector * shifted_structdis_col_vector
            L = lmom(pair.reshape(pair.size,1))
            feat = np.append(feat, L)
        im = cv2.resize(im, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
    return feat


im = ndimage.imread('example.bmp', flatten=True)
feat = compute_features(im)
print feat
相关问题