经过卷积神经元网络后,4维数据成为2维

时间:2018-05-21 10:36:54

标签: python keras

我正在训练一个有两种输入类型的神经元网络:图像和BR(蓝色到红色,它是一种非图像特征,如身高,体重......)。为此,我在keras中使用fit函数,并将图像转换为list以进行输入。但我不知道为什么具有4维形状的图像列表在进入拟合时变成2个维度,我得到如下错误:

  

检查输入时出错:期望的dense_1_input有3   尺寸,但得到形状的阵列(1630,1)

当我将图像列表转换为数组时,我检查了image_array的形状,它有4个维度(特别是它的形状是1630,60,60,3)。即使在拟合功能之前,它仍然具有相同的形状。所以我真的不知道为什么形状变成了(1630,1)。有人可以帮我解释一下吗?

这是我的代码:

from keras.utils.np_utils import to_categorical
import pandas as pd 
import numpy as np
import os
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense,Concatenate, concatenate,Reshape, BatchNormalization, Merge
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import Adagrad
from sklearn import preprocessing
from scipy.misc import imread
import time
from PIL import Image 
import cv2
img_width, img_height = 60, 60
img_list = []
BR_list = []
label_list = []
data_num = 1630

folder1 = "cut2/train/sugi/"

folder2 = "cut2/train/hinoki/"

def imgConvert(file_path):
    img = imread(file_path,flatten = True)
    img = np.arange(1*3*60*60).reshape((60,60,3))
    img = np.array(img).reshape(60,60,3)
    img = img.astype("float32")
    return img

def B_and_R(img_path):
    img = cv2.imread(img_path)
    B = 0
    R = 0

    for i in range(25,35):
    #print(i)
        for j in range(25,35):
            B = B+img[i,j,0]
            R = R+img[i,j,2]
        #(j)
        #(img[i,j])

    ave_B = B/100
    ave_R = R/100

    BR = ave_B/ave_R
    return BR

def getData(path,pollen):
    for the_file in os.listdir(path):
        #print(the_file)
        file_path = os.path.join(path, the_file)
        B_over_R = B_and_R(file_path)
        img_arr = imgConvert(file_path)
        #writer.writerow([img_arr,B_over_R,"sugi"])
        img_list.append(img_arr)
        BR_list.append(B_over_R)
        lb = np.zeros(2)
        if pollen == "sugi":
            lb[0] +=1
        else:
            lb[1] +=1
        label_list.append(lb)

if __name__ == '__main__':

    getData(folder1,"sugi")
    getData(folder2,"hinoki")

    img_arr = np.array(img_list)
    print(img_arr.shape)
    #.reshape(img_list[0],1,img_width,img_height)
    img_arr.astype("float32")
    img_arr /= 255
    print(img_arr.shape)
    img_array = np.expand_dims(img_arr, axis = 0)
    img_array = img_array[0,:,:,:,:]
    print(img_array.shape)

    """
    datagen = ImageDataGenerator(
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True)
    datagen.fit(img_array)
    """
    #img_array = img_array.reshape(img_array[0],1,img_width,img_height)

    print(img_array.shape)
    label_arr = np.array(label_list)
    print(label_arr.shape)
    #label_array = np.expand_dims(label_arr, axis = 0)
    #label_array = label_array[0,:,:,:,:]

    BR_arr = np.array(BR_list)
    print(BR_arr.shape)
    #BR_array = np.expand_dims(BR_arr, axis = 0)
    #BR_array = BR_array[0,:,:,:,:]

    #print(len([img_arr,BR_arr]))

    input_tensor = Input(shape=(img_width, img_height,3))
    vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)

    # FC層の作成
    top_model = Sequential()
    top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))

#print(top_model.summary())

    # VGG16とFC層を結合してモデルを作成
    branch1 = Model(input=vgg16.input, output=top_model(vgg16.output))
#model.summary()
    print(branch1.summary())


    branch2 = Sequential()
    branch2.add(Dense(1, input_shape=(data_num,1), activation='sigmoid'))
    #branch1.add(Reshape(BR.shape, input_shape = BR.shape))
    branch2.add(BatchNormalization())
    branch2.add(Flatten())

    print(branch2.summary())
    merged = Merge([branch1, branch2], mode = "concat")

    model = Sequential()
    model.add(merged)
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

   #last_model = Model(input = [branch1.input,branch2.input],output=model())
    print(model.summary())
    model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
              metrics=['accuracy'])
    print(img_array.shape)
    model.fit([img_array,BR_arr], label_arr,
            epochs=5, batch_size=100, verbose=1)

1 个答案:

答案 0 :(得分:0)

好的,问题是输入形状。

虽然分支2的数据是2D (batch, 1),但您的模型也应该有2D输入:input_shape = (1,)。 (input_shape

中忽略批量大小