关于Keras重塑中输入尺寸的错误

时间:2018-08-07 13:44:55

标签: keras reshape

我正在尝试在R的Keras中将尺寸为(1,512,512,9)的张量'输出'重整为尺寸为(1,512 * 512,9)的张量'output_reshaped'。这应该是有史以来最简单的事情,但是由于某种原因,它不是。

这是我正在使用的代码:

output_reshaped = layer_reshape(output ,target_shape =  c( w*h, class) ) 

当我尝试拟合模型时,会弹出以下错误

Error in py_call_impl(callable, dots$args, dots$keywords) : 
ValueError: Error when checking target: expected reshape_16 to have 3 dimensions, but got array with shape (1, 512, 512, 9) 

这是怎么回事?根据keras文档,此功能应该能够处理任意输入形状?

参数

library(keras)
library(EBImage)
library(raster)

epochs = 280
batch_size = 1L
parts = 4
h = as.integer(512) #heigth image dim image = 2448
w = as.integer(512) #width image
channels = 3L
class = 9L
pick = 100L
path = '/media/daniel/6DA3F120567E843D/schelde'


train = readRDS( file.path(path, 'train.rds'))

完整模型

input_img = layer_input(shape = c(w, h, channels)) 
#background_mask = layer_input(shape = c(w, h, channels)) 


l0 = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same",        activation = 'relu' )(input_img) 
l0 = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l0)

l1 = layer_max_pooling_2d(pool_size = c(2,2))(l0)
l1 = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l1) 
l1 = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l1)

l2 = layer_max_pooling_2d(pool_size = c(2,2))(l1)
l2 = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l2) 
l2 = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l2)

l3 = layer_max_pooling_2d(pool_size = c(2,2))(l2)
l3 = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l3) 
l3 = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l3)

l4 = layer_max_pooling_2d(pool_size = c(2,2))(l3) 
l4 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l4) 
l4 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l4)

l5 = layer_max_pooling_2d(pool_size = c(2,2))(l4)
l5 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l5) 
l5 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l5)

l6 = layer_max_pooling_2d(pool_size = c(2,2))(l5)
l6 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l6) 
l6 = layer_conv_2d( filter=1024, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l6)

l5_up = layer_conv_2d_transpose(  filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l6)
l5_up = layer_concatenate( list(l5,l5_up))
l5_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l5_up) 
l5_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l5_up)

l4_up = layer_conv_2d_transpose(  filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l5)
l4_up = layer_concatenate( list(l4,l4_up))
l4_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l4_up) 
l4_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l4_up)

l3_up = layer_conv_2d_transpose(  filters = 512 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l4)
l3_up = layer_concatenate( list(l3,l3_up))
l3_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l3_up) 
l3_up = layer_conv_2d( filter=512, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l3_up)

l2_up = layer_conv_2d_transpose(  filters = 256 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l3_up)
l2_up = layer_concatenate( list(l2,l2_up))
l2_up = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l2_up) 
l2_up = layer_conv_2d( filter=256, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l2_up)


l1_up = layer_conv_2d_transpose(  filters = 128 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l2_up)
l1_up = layer_concatenate( list(l1,l1_up))
l1_up = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l1_up) 
l1_up = layer_conv_2d( filter=128, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l1_up)

l0_up = layer_conv_2d_transpose(  filters = 64 , kernel_size=c(3,3) ,strides = c(2L, 2L), padding="same")(l1_up)
l0_up = layer_concatenate( list(l0,l0_up))
l0_up = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l0_up) 
l0_up = layer_conv_2d( filter=64, kernel_size=c(3,3),padding="same",     activation = 'relu' )(l0_up)

output = layer_conv_2d( filter=class, kernel_size=c(1,1),padding="same",     activation = 'softmax' )(l0_up)

output_reshaped = layer_reshape(output , batch_input_shape = c(1L, w, h, class) ,target_shape =  c( w*h, class) ) 

model = keras_model(inputs = input_img, outputs = output_reshaped)


opt<-optimizer_adam( lr= 0.0001 , decay = 0,  clipnorm = 1 )

compile(model, loss="categorical_crossentropy", optimizer=opt, metrics = "accuracy")

运行模型

input_im = readImage( file.path(path, train$images[i]))
input_im = array(input_im, dim = c(1, dim(input_im)))

input_lab = raster::as.matrix(raster( file.path(path, train$labels[i]))) +1

input_lab  = apply(input_lab, c(1,2), function(x){
  z = rep(0, class)
  z[x] = 1
  z
})

input_lab = aperm(input_lab, c(2,3,1))
input_lab = array(input_lab, dim = c(1, dim(input_lab)))  




model$fit( x= input_im, y= input_lab, batch_size = batch_size, epochs = 1L )

0 个答案:

没有答案
相关问题