TensorFlow错误:logits和标签的大小必须相同?

时间:2018-01-01 15:34:54

标签: python-3.x tensorflow machine-learning computer-vision tensorflow-gpu

已下载CBIS-DDSM数据集,即DICOM格式的修订版DDSM数据集。使用此数据集,我一直在尝试根据YouTuber" sentdex'使用的代码运行CNN。大部分数据集的预处理已经整理出来,但正确运行CNN已经变得令人不安。我认为这一行阻止了整个CNN代码的功能:

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,
                                                            labels=y))

即使我改变了部分

'labels=y' 

'labels=tf.reshape(y, [1,2])'

,我仍然有相同的错误消息。我试图绕过' tf.nn.softmax_cross_entropy_with_logits'与数字不稳定的版本。然后,可以运行CNN。但我只得到了' nan'。关于我应该做什么的任何建议?我真的卡住了。

这是我的代码的摘录

IMG_SIZE_PX = 256
N_CLASSES = 2

def convolutional_neural_network(x):
    weights = {'W_conv1':tf.Variable(tf.random_normal([5,5,1,32])),
               'W_conv2':tf.Variable(tf.random_normal([5,5,32,64])),
               'W_fc':tf.Variable(tf.random_normal([IMG_SIZE_PX*IMG_SIZE_PX,1024])),
               'out':tf.Variable(tf.random_normal([1024, N_CLASSES]))}

    biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
               'b_conv2':tf.Variable(tf.random_normal([64])),
               'b_fc':tf.Variable(tf.random_normal([1024])),
               'out':tf.Variable(tf.random_normal([N_CLASSES]))}

    x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, 1])

    conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])
    conv1 = maxpool2d(conv1)

    conv1Shape = conv1.get_shape().as_list()
    print ("Conv1 Shape! : " + str(conv1Shape))

    conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])
    conv2 = maxpool2d(conv2)

    conv2Shape = conv2.get_shape().as_list()
    print ("Conv2 Shape! : " + str(conv2Shape))

    fc = tf.reshape(conv2,[-1, 64 * 64 * 16])
    fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
    fc = tf.nn.dropout(fc, KEEP_RATE)

    output = tf.matmul(fc, weights['out'])+biases['out']
    return output

def train_neural_network(x, EnableUnstableNumericalCalc = False):
    #   Loading the Input File; list of (Numpy array, label)
    Data_Chunk = np.load('MTrain-test-calc-256.npy')
    train_data = Data_Chunk[:NegNumber]
    validation_data = Data_Chunk[NegNumber:]

    #   Training part
    prediction = convolutional_neural_network(x)
    predShape = prediction.get_shape().as_list()

    if EnableUnstableNumericalCalc == False:
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,
                                                                labels=y))

    else:
        intermed    = tf.nn.softmax(prediction)
        cost        = -tf.reduce_sum(y * tf.log(intermed), 1)

    optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)    
    hm_epochs = NB_EPOCHS

    print("===============================================================================================")
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        successful_runs = 0
        total_runs = 0

        for epoch in range(hm_epochs):
            epoch_loss = 0
            for data in train_data:
                total_runs += 1

                X = data[0]
                Y = data[1]

#                try:
                _, c            =   sess.run([optimizer, cost], feed_dict={x: X, y: Y})

                epoch_loss      +=  c
                successful_runs +=  1

                """
                except InvalidArgumentError:
                    print("***** Error(Training): Invalid Arument Error! *****")
                    '''
                    print("X = {}".format(X))
                    print("Y = {}".format(Y))
                    print(str(e))
                    '''
                    pass
                """

            try:
                correct     = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
                accuracy    = tf.reduce_mean(tf.cast(correct, 'float'))

                print('Epoch', epoch + 1, 'completed out of', hm_epochs,'loss:', epoch_loss)
                print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))

            except Exception as e:
                print("!: Invalid information. Accuracy and fitment percent could not be computed.!")

这是错误信息:

Traceback (most recent call last):
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1323, in _do_call
    return fn(*args)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1302, in _run_fn
    status, run_metadata)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 473, in __exit__
    c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must be same size: logits_size=[4,2] labels_size=[1,2]
         [[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](Reshape_2, Reshape_3)]]

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "ConvNet.py", line 149, in <module>
    train_neural_network(x)
  File "ConvNet.py", line 120, in train_neural_network
    _, c            =   sess.run([optimizer, cost], feed_dict={x: X, y: Y})
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 889, in run
    run_metadata_ptr)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1120, in _run
    feed_dict_tensor, options, run_metadata)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1317, in _do_run
    options, run_metadata)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1336, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: logits and labels must be same size: logits_size=[4,2] labels_size=[1,2]
         [[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](Reshape_2, Reshape_3)]]

Caused by op 'SoftmaxCrossEntropyWithLogits', defined at:
  File "ConvNet.py", line 149, in <module>
    train_neural_network(x)
  File "ConvNet.py", line 95, in train_neural_network
    labels=y))
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 1783, in softmax_cross_entropy_with_logits
    precise_logits, labels, name=name)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\ops\gen_nn_ops.py", line 4363, in _softmax_cross_entropy_with_logits
    name=name)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
    op_def=op_def)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 2956, in create_op
    op_def=op_def)
  File "C:\Pythons\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1470, in __init__
    self._traceback = self._graph._extract_stack()  # pylint: disable=protected-access

InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[4,2] labels_size=[1,2]
         [[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](Reshape_2, Reshape_3)]]

0 个答案:

没有答案