将张量转换为numpy数组

时间:2018-06-19 14:51:46

标签: numpy tensorflow

我编写以下代码,以从具有深CNN usinf张量流的两个图像中提取特征:

# -*- coding: utf-8 -*-
# Implementation of Wang et al 2017: Automatic Brain Tumor Segmentation using Cascaded Anisotropic Convolutional Neural Networks. https://arxiv.org/abs/1709.00382

# Author: Guotai Wang
# Copyright (c) 2017-2018 University College London, United Kingdom. All rights reserved.
# http://cmictig.cs.ucl.ac.uk
#
# Distributed under the BSD-3 licence. Please see the file licence.txt
# This software is not certified for clinical use.
#
from __future__ import absolute_import, print_function
import numpy as np
from scipy import ndimage
import time
import os
import sys
import pickle
import tensorflow as tf
from tensorflow.contrib.data import Iterator
from util.data_loader import *
from util.data_process import *
from util.train_test_func import *
from util.parse_config import parse_config
from train import NetFactory
print("import finished")
def test(config_file):
    # 1, load configure file
    config = parse_config(config_file)
    config_data = config['data']
    config_net1 = config.get('network1', None)
    config_net2 = config.get('network2', None)
    config_net3 = config.get('network3', None)
    config_test = config['testing']  
    batch_size  = config_test.get('batch_size', 5)
    print("configure file loaded")

    # 2.1, network for whole tumor
    if(config_net1):
        net_type1    = config_net1['net_type']
        net_name1    = config_net1['net_name']
        data_shape1  = config_net1['data_shape']
        label_shape1 = config_net1['label_shape']
        class_num1   = config_net1['class_num']
        print("configure file of whole tumor is loaded")

        # construct graph for 1st network
        full_data_shape1 = [batch_size] + data_shape1
        x1 = tf.placeholder(tf.float32, shape = full_data_shape1)          
        net_class1 = NetFactory.create(net_type1)
        net1 = net_class1(num_classes = class_num1,w_regularizer = None,
                    b_regularizer = None, name = net_name1)
        net1.set_params(config_net1)
        predicty1, caty1 = net1(x1, is_training = True)
        proby1 = tf.nn.softmax(predicty1)
    else:
        config_net1ax = config['network1ax']
        config_net1sg = config['network1sg']
        config_net1cr = config['network1cr']
        print("configure files of whole tumor in three planes are loaded")

        # construct graph for 1st network axial
        net_type1ax    = config_net1ax['net_type']
        net_name1ax    = config_net1ax['net_name']
        data_shape1ax  = config_net1ax['data_shape']
        label_shape1ax = config_net1ax['label_shape']
        class_num1ax   = config_net1ax['class_num']

        full_data_shape1ax = [batch_size] + data_shape1ax
        x1ax = tf.placeholder(tf.float32, shape = full_data_shape1ax)          
        net_class1ax = NetFactory.create(net_type1ax)
        net1ax = net_class1ax(num_classes = class_num1ax,w_regularizer = None,
                    b_regularizer = None, name = net_name1ax)
        net1ax.set_params(config_net1ax)
        predicty1ax, caty1ax = net1ax(x1ax, is_training = True)
        proby1ax = tf.nn.softmax(predicty1ax)
        print("graph for 1st network1ax is constructed")

        # construct graph for 1st network sagittal
        net_type1sg    = config_net1sg['net_type']
        net_name1sg    = config_net1sg['net_name']
        data_shape1sg  = config_net1sg['data_shape']
        label_shape1sg = config_net1sg['label_shape']
        class_num1sg   = config_net1sg['class_num']

        full_data_shape1sg = [batch_size] + data_shape1sg
        x1sg = tf.placeholder(tf.float32, shape = full_data_shape1sg)          
        net_class1sg = NetFactory.create(net_type1sg)
        net1sg = net_class1sg(num_classes = class_num1sg,w_regularizer = None,
                    b_regularizer = None, name = net_name1sg)
        net1sg.set_params(config_net1sg)
        predicty1sg, caty1sg = net1sg(x1sg, is_training = True)
        proby1sg = tf.nn.softmax(predicty1sg)
        print("graph for 1st network1sg is constructed")

        # construct graph for 1st network coronal
        net_type1cr    = config_net1cr['net_type']
        net_name1cr    = config_net1cr['net_name']
        data_shape1cr  = config_net1cr['data_shape']
        label_shape1cr = config_net1cr['label_shape']
        class_num1cr   = config_net1cr['class_num']

        full_data_shape1cr = [batch_size] + data_shape1cr
        x1cr = tf.placeholder(tf.float32, shape = full_data_shape1cr)          
        net_class1cr = NetFactory.create(net_type1cr)
        net1cr = net_class1cr(num_classes = class_num1cr,w_regularizer = None,
                    b_regularizer = None, name = net_name1cr)
        net1cr.set_params(config_net1cr)
        predicty1cr, caty1cr = net1cr(x1cr, is_training = True)
        proby1cr = tf.nn.softmax(predicty1cr)
        print("graph for 1st network1cr is constructed")

    # 3, create session and load trained models
    all_vars = tf.global_variables()
    sess = tf.InteractiveSession()   
    sess.run(tf.global_variables_initializer())  
    if(config_net1):
        net1_vars = [x for x in all_vars if x.name[0:len(net_name1) + 1]==net_name1 + '/']
        saver1 = tf.train.Saver(net1_vars)
        saver1.restore(sess, config_net1['model_file'])
    else:
        net1ax_vars = [x for x in all_vars if x.name[0:len(net_name1ax) + 1]==net_name1ax + '/']
        saver1ax = tf.train.Saver(net1ax_vars)
        saver1ax.restore(sess, config_net1ax['model_file'])
        net1sg_vars = [x for x in all_vars if x.name[0:len(net_name1sg) + 1]==net_name1sg + '/']
        saver1sg = tf.train.Saver(net1sg_vars)
        saver1sg.restore(sess, config_net1sg['model_file'])     
        net1cr_vars = [x for x in all_vars if x.name[0:len(net_name1cr) + 1]==net_name1cr + '/']
        saver1cr = tf.train.Saver(net1cr_vars)
        saver1cr.restore(sess, config_net1cr['model_file'])
        print("all variables of net1 is saved")

    # 4, load test images
    dataloader = DataLoader(config_data)
    dataloader.load_data()
    image_num = dataloader.get_total_image_number()

    # 5, start to test
    test_slice_direction = config_test.get('test_slice_direction', 'all')
    save_folder = config_data['save_folder']
    test_time = []
    struct = ndimage.generate_binary_structure(3, 2)
    margin = config_test.get('roi_patch_margin', 5)

    x=['x1','x2']
    paddings=tf.constant([[0,0],[0,0],[10,10],[0,0],[0,0]])
    for i in range(image_num):
        [temp_imgs, temp_weight, temp_name, img_names, temp_bbox, temp_size] = dataloader.get_image_data_with_name(i)
        t0 = time.time()
        # 5.1, test of 1st network
        if(config_net1):
            data_shapes  = [ data_shape1[:-1],  data_shape1[:-1],  data_shape1[:-1]]
            label_shapes = [label_shape1[:-1], label_shape1[:-1], label_shape1[:-1]]
            nets = [net1, net1, net1]
            outputs = [proby1, proby1, proby1]
            inputs =  [x1, x1, x1]
            class_num = class_num1
        else:
            data_shapes  = [ data_shape1ax[:-1],  data_shape1sg[:-1],  data_shape1cr[:-1]]
            label_shapes = [label_shape1ax[:-1], label_shape1sg[:-1], label_shape1cr[:-1]]
            nets = [net1ax, net1sg, net1cr]
            outputs = [proby1ax, proby1sg, proby1cr]
            inputs =  [x1ax, x1sg, x1cr]
            class_num = class_num1ax
        predi=tf.concat([predicty1ax,tf.reshape(predicty1sg,[5,11,180,160,2]),tf.pad(predicty1cr,paddings,"CONSTANT")],0)
        cati=tf.concat([caty1ax,tf.reshape(caty1sg,[5,11,180,160,14]),tf.pad(caty1cr,paddings,"CONSTANT")],0)
        prob1 = test_one_image_three_nets_adaptive_shape(temp_imgs, data_shapes, label_shapes, data_shape1ax[-1], class_num,
                   batch_size, sess, nets, outputs, inputs, shape_mode = 0)
        pred1 =  np.asarray(np.argmax(prob1, axis = 3), np.uint16)
        pred1 = pred1 * temp_weight
        print("net1 is tested")
        globals()[x[i]]=predi
        test_time.append(time.time() - t0)
        print(temp_name)
    test_time = np.asarray(test_time)
    print('test time', test_time.mean())
    np.savetxt(save_folder + '/test_time.txt', test_time)

if __name__ == '__main__':
    if(len(sys.argv) != 2):
        print('Number of arguments should be 2. e.g.')
        print('    python test.py config17/test_all_class.txt')
        exit()
    config_file = str(sys.argv[1])
    assert(os.path.isfile(config_file))
    test(config_file)
    y=tf.stack([x1,x2],0)
    z=tf.Session().run(y)

输出为tensor(y),我想使用tf.Session().run()将其转换为numpy数组,但出现此错误:

  

InvalidArgumentError(请参见上面的回溯):您必须使用dtype float和shape [5,19,180,160,4]输入占位符张量“ Placeholder”的值        [[节点:占位符= Placeholderdtype = DT_FLOAT,形状= [5,19,180,160,4],_ device =“ / job:localhost /副本:0 / task:0 / device:GPU:0”]]

1 个答案:

答案 0 :(得分:0)

请注意,此答案基于对水晶球的深入了解,可以预测似乎已被分类的代码-至少没有写在问题本身中。

看看错误消息:

  

InvalidArgumentError(请参阅上面的回溯):您必须输入占位符张量的值

这正是您的代码出了什么问题。整理一下,您的代码本质上就是(存在很多问题):

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3])
y = tf.layers.dense(x1, 2)

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

print(tf.Session().run(y))

在不知道y的值的情况下无法评估输出张量x1 ,因为它取决于该值。

1。修正使用正确的命名

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

print(tf.Session().run(y))

现在,错误消息变得更加清晰

  

tensorflow.python.framework.errors_impl.InvalidArgumentError:必须输入占位符张量' my_input '的值,其类型为dtype float和形状[?,3]

2。修复:提供一个feed_dict

要让TensorFlow知道y的计算应基于哪个值,您需要将其输入到图中:

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
np_result = tf.Session().run(y, feed_dict={x1: [[42, 43, 44]]})

现在,这揭示了代码的第二个问题。您有 2 个会话:

  • sess = tf.InteractiveSession()(session_a)
  • tf.Session()中的tf.Session().run()(session_b)

现在, session_a 获取所有已初始化的变量,因为您的代码包含

sess.run(tf.global_variables_initializer())

但是,在tf.Session().run(...)期间创建了另一个会话,并留下了新的错误消息:

  

FailedPreconditionError(请参阅上面的回溯):尝试使用未初始化的值...

3。修复:仅使用一个会话

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
np_result = sess.run(y, feed_dict={x1: [[42, 43, 44]]})

并提供最佳的解决方案:

import tensorflow as tf

# construct graph somewhere
x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

with tf.Session() as sess:
    # init variables / or load them
    sess.run(tf.global_variables_initializer())
    # make sure, that no operations willl be added to the graph
    sess.graph.finalize()

    # fetch result as numpy array
    np_result = sess.run(y, feed_dict={x1: [[42, 43, 44]]})

您自己编写或从某个地方复制的代码是“如何在张量流中编写”的最佳演示。

最后一句话:

TensorFlow会强制您创建干净的结构。这个很重要。遵循这种结构应该成为一种习惯。一段时间后,您会立即看到这些部分,闻起来像是错误的代码。

如果您使用整个网络,只需将tf.layers.dense替换为my_network_definition,然后

def my_network_definition(x1):
    output = ...
    return output

在pytorch中,您可以使用问题中提供的任意样式进行编写。不说,你应该这样做。但是有可能。因此,请尝试遵循TensorFlow期望的结构。

尊敬的pytorch用户,我期待您的反馈。

相关问题