完全卷积神经网络在分割任务中产生全零

时间:2017-10-23 16:40:24

标签: python machine-learning tensorflow conv-neural-network

我正在使用张量流来训练一个完全卷积神经网络,用于MRI数据中的感兴趣区域分割和地面实况掩模。理想情况下,我希望能够以与提供输入相同的形状生成ROI的“概率图”。

我的网络运行没有错误,但是当我加载我的权重并运行预测时,我留下了一个全零的数组。我希望有人可以帮我找出原因。

我的网络将一个96x80的MRI切片和一个“翻转”版本一起作为一个批次。

我的网络定义如此,总之,它使用一系列卷积和最大化来获得高度采样的特征映射,然后使用一系列转置卷积将这些映射上采样为分段。特别是,我总结了最后一步中maxpool操作的上采样版本:

def maxpool2dWrap(x, k=2):
# MaxPool2D wrapper
    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')


def conv2dWrap(x, W, b, strides=1):
    # Conv2D wrapper, with bias and relu activation
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME', data_format='NHWC')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu(x)

def conv2dTransWrap(x, W, b, shape, strides=[1,2,2,2]):
    dyn_input_shape = tf.shape(x)
    batch_size = dyn_input_shape[0]
    x = tf.nn.conv2d_transpose(x, W, [batch_size,shape[0],shape[1],shape[2]], strides, padding='SAME')
    x = tf.nn.bias_add(x,b)
    return tf.nn.relu(x)

def conv_net(x, weights, biases):    
    conv1   = conv2dWrap(x,     weights['wConv1'], biases['bConv1'])
    conv2   = conv2dWrap(conv1, weights['wConv2'], biases['bConv2'])
    conv2   = maxpool2dWrap(conv2)

    conv3   = conv2dWrap(conv2, weights['wConv3'], biases['bConv3'])
    conv4   = conv2dWrap(conv3, weights['wConv4'], biases['bConv4'])
    conv4   = maxpool2dWrap(conv4)

    conv5   = conv2dWrap(conv4, weights['wConv5'], biases['bConv5'])
    conv6   = conv2dWrap(conv5, weights['wConv6'], biases['bConv6'])
    conv6   = maxpool2dWrap(conv6)

    conv7   = conv2dWrap(conv6, weights['wConv7'], biases['bConv7'])
    conv8   = conv2dWrap(conv7, weights['wConv8'], biases['bConv8'])
    conv8   = maxpool2dWrap(conv8)

    conv9   = conv2dWrap(conv8, weights['wConv9'], biases['bConv9'])
    conv10  = conv2dWrap(conv9, weights['wConv10'], biases['bConv10'])
    conv11  = conv2dWrap(conv10, weights['wConv11'], biases['bConv11'])
    conv12  = conv2dWrap(conv11, weights['wConv12'], biases['bConv12'])

    conv13  = conv2dTransWrap(conv12, weights['wConv13'], biases['bConv13'],[10,12,256],[1,2,2,1])
    conv14  = conv2dWrap(conv13, weights['wConv14'], biases['bConv14'])
    conv15  = conv2dWrap(conv14, weights['wConv15'], biases['bConv15'])

    conv16  = conv2dTransWrap(conv15, weights['wConv16'], biases['bConv16'],[20,24,128],[1,2,2,1])
    conv17  = conv2dWrap(conv16, weights['wConv17'], biases['bConv17'])
    conv18  = conv2dWrap(conv17, weights['wConv18'], biases['bConv18'])

    conv19  = conv2dTransWrap(conv18, weights['wConv19'], biases['bConv19'],[40,48,64],[1,2,2,1])
    conv20  = conv2dWrap(conv19, weights['wConv20'], biases['bConv20'])
    conv21  = conv2dWrap(conv20, weights['wConv21'], biases['bConv21'])

    conv22  = conv2dTransWrap(conv21, weights['wConv22'], biases['bConv22'],[80,96,32],[1,2,2,1])
    conv23  = conv2dWrap(conv22, weights['wConv23'], biases['bConv23'])
    conv24  = conv2dWrap(conv23, weights['wConv24'], biases['bConv24']) 


    #SKIP CONNECTIONS
    conv25  = conv2dTransWrap(conv4, weights['wConv25'], biases['bConv25'],[80,96,16],[1,4,4,1])
    conv26  = conv2dTransWrap(conv6, weights['wConv26'], biases['bConv26'],[80,96,16],[1,8,8,1])
    conv27  = conv2dTransWrap(conv8, weights['wConv27'], biases['bConv27'],[80,96,16],[1,16,16,1])

    features = conv24+conv25+conv26+conv27

    convOUT = conv2dWrap(features, weights['wOUT'], biases['bOUT'])

    convOUT = tf.reshape(convOUT, [-1])
    print(convOUT.shape)
    return convOUT

我的权重和偏见因此被初始化:

weights = {
# 3x3 conv, 1 input, 3 outputs
'wConv1':  tf.Variable(tf.random_normal([3, 3, 1,  16],0,.01),  name='wConv1'),
'wConv2':  tf.Variable(tf.random_normal([3, 3, 16, 16],0,.004),  name='wConv2'),
'wConv3':  tf.Variable(tf.random_normal([3, 3, 16, 32],0,.004),  name='wConv3'),
'wConv4':  tf.Variable(tf.random_normal([3, 3, 32, 32],0,.002), name='wConv4'),
'wConv5':  tf.Variable(tf.random_normal([3, 3, 32, 64],0,.002), name='wConv5'),
'wConv6':  tf.Variable(tf.random_normal([3, 3, 64, 64],0,.002),  name='wConv6'),
'wConv7':  tf.Variable(tf.random_normal([3, 3, 64, 128],0,.001),  name='wConv7'),
'wConv8':  tf.Variable(tf.random_normal([3, 3, 128, 128],0,.001),  name='wConv8'),
'wConv9':  tf.Variable(tf.random_normal([3, 3, 128, 256],0,.001),  name='wConv9'),
'wConv10':  tf.Variable(tf.random_normal([3, 3, 256, 256],0,.001),  name='wConv10'),
'wConv11':  tf.Variable(tf.random_normal([3, 3, 256, 512],0,.001),  name='wConv11'),
'wConv12':  tf.Variable(tf.random_normal([3, 3, 512, 512],0,.0007),  name='wConv12'),
'wConv13':  tf.Variable(tf.random_normal([3, 3, 256, 512],0,.0007),  name='wConv13'),
'wConv14':  tf.Variable(tf.random_normal([3, 3, 256, 256],0,.001),  name='wConv14'),
'wConv15':  tf.Variable(tf.random_normal([3, 3, 256, 256],0,.001),  name='wConv15'),
'wConv16':  tf.Variable(tf.random_normal([3, 3, 128, 256],0,.001),  name='wConv16'),
'wConv17':  tf.Variable(tf.random_normal([3, 3, 128, 128],0,.001),  name='wConv17'),
'wConv18':  tf.Variable(tf.random_normal([3, 3, 128, 128],0,.001),  name='wConv18'),
'wConv19':  tf.Variable(tf.random_normal([3, 3, 64, 128],0,.001),  name='wConv19'),
'wConv20':  tf.Variable(tf.random_normal([3, 3, 64, 64],.002),  name='wConv20'),
'wConv21':  tf.Variable(tf.random_normal([3, 3, 64, 64],0,.002),  name='wConv21'),
'wConv22':  tf.Variable(tf.random_normal([3, 3, 32, 64],0,.002),  name='wConv22'),
'wConv23':  tf.Variable(tf.random_normal([3, 3, 32, 32],0,.002),  name='wConv23'),
'wConv24':  tf.Variable(tf.random_normal([3, 3, 32, 16],0,.002),  name='wConv24'),
'wConv25':  tf.Variable(tf.random_normal([3, 3, 16, 32],0,.004),  name='wConv25'),
'wConv26':  tf.Variable(tf.random_normal([3, 3, 16, 64],0,.002),  name='wConv26'),
'wConv27':  tf.Variable(tf.random_normal([3, 3, 16, 128],0,.001),  name='wConv27'),

'wOUT'  :  tf.Variable(tf.random_normal([3, 3, 16, 1],0,.01),   name='wOUT')
}

biases = {
'bConv1': tf.Variable(tf.random_normal([16],0,.01),   name='bConv1'),
'bConv2': tf.Variable(tf.random_normal([16],0,.01),  name='bConv2'),
'bConv3': tf.Variable(tf.random_normal([32],0,.01),  name='bConv3'),
'bConv4': tf.Variable(tf.random_normal([32],0,.01), name='bConv4'),
'bConv5': tf.Variable(tf.random_normal([64],0,.01),  name='bConv5'),
'bConv6': tf.Variable(tf.random_normal([64],0,.01),  name='bConv6'),
'bConv7': tf.Variable(tf.random_normal([128],0,.01),  name='bConv7'),
'bConv8': tf.Variable(tf.random_normal([128],0,.01),  name='bConv8'),
'bConv9': tf.Variable(tf.random_normal([256],0,.01),  name='bConv9'),
'bConv10': tf.Variable(tf.random_normal([256],0,.01),  name='bConv10'),
'bConv11': tf.Variable(tf.random_normal([512],0,.01),  name='bConv11'),
'bConv12': tf.Variable(tf.random_normal([512],0,.01),  name='bConv12'),
'bConv13': tf.Variable(tf.random_normal([256],0,.01),  name='bConv13'),
'bConv14': tf.Variable(tf.random_normal([256],0,.01),  name='bConv14'),
'bConv15': tf.Variable(tf.random_normal([256],0,.01),  name='bConv15'),
'bConv16': tf.Variable(tf.random_normal([128],0,.01),  name='bConv16'),
'bConv17': tf.Variable(tf.random_normal([128],0,.01),  name='bConv17'),
'bConv18': tf.Variable(tf.random_normal([128],0,.01),  name='bConv18'),
'bConv19': tf.Variable(tf.random_normal([64],0,.01),  name='bConv19'),
'bConv20': tf.Variable(tf.random_normal([64],0,.01),  name='bConv20'),
'bConv21': tf.Variable(tf.random_normal([64],0,.01),  name='bConv21'),
'bConv22': tf.Variable(tf.random_normal([32],0,.01),  name='bConv22'),
'bConv23': tf.Variable(tf.random_normal([32],0,.01),  name='bConv23'),
'bConv24': tf.Variable(tf.random_normal([16],0,.01),  name='bConv24'),
'bConv25': tf.Variable(tf.random_normal([16],0,.01),  name='bConv25'),
'bConv26': tf.Variable(tf.random_normal([16],0,.01),  name='bConv26'),
'bConv27': tf.Variable(tf.random_normal([16],0,.01),  name='bConv27'),
'bOUT': tf.Variable(tf.random_normal([1],0,.01),     name='bOUT')
}

我使用Adams优化器和tf.losses.softmax_cross_entropy作为我的损失函数。

0 个答案:

没有答案
相关问题