我的TensorFlow网络的准确性似乎并不代表我的网络预测数据的实际能力

时间:2017-11-27 14:43:44

标签: python tensorflow neural-network

我在变量data中保存了一个数据集,其格式为:

data = [
    {'index': 123,
     'balance': [],
     'probaility': 0.89,
     'failed': True,
     'rank': 'A'},
    {'index': 50234,
     'balance': [],
     'probaility': 0.45,
     'failed': False,
     'rank': 'B'}]

其中data[i]['balance']是一个长44个元素的整数列表,data有50000个元素。

我希望我的网络能够通过输入'rank'来预测'balance'。这是我用来训练和测试网络的代码:

import tensorflow as tf
import numpy as np
import multiprocessing as multip

# this labels data so that a firm in class A has label [1, 0, 0, 0, 0, 0, 0], a firm in
# class B [0, 1, 0, 0, 0, 0, 0] and so on
def calc_label(data):
    label = [0, 0, 0, 0, 0, 0, 0]
    if data['rank'] == 'A':
        label[0] = 1
    elif data['rank'] == 'B':
        label[1] = 1
    elif data['rank'] == 'C':
        label[2] = 1
    elif data['rank'] == 'D':
        label[3] = 1
    elif data['rank'] == 'E':
        label[4] = 1
    elif data['rank'] == 'F':
        label[5] = 1
    elif data['rank'] == 'Def':
        label[6] = 1
    return label


data = [
    {'index': 123,
     'balance': [],
     'probaility': 0.89,
     'failed': True,
     'rank': 'A'},
    {'index': 50234,
     'balance': [],
     'probaility': 0.45,
     'failed': False,
     'rank': 'B'}]

features = [x['balance'] for x in data]
labels = [calc_label(x) for x in data]

train_size = int(len(labels) * 0.9)
train_y = labels[:train_size]
test_y = labels[train_size:]
train_x = features[:train_size]
test_x = features[train_size:]

classes_n = len(labels[0])
nodes_per_layer = [100, 100]
hidden_layers_n = len(nodes_per_layer)
batch_size = 50000
epochs = 500
print_step = 50
saving_step = 100

x = tf.placeholder('float', [None, len(features[0])])
y = tf.placeholder('float', [None, classes_n])

current_epoch = tf.Variable(1)

layers = [{'weights': tf.Variable(tf.random_normal([len(features[0]), nodes_per_layer[0]])),
           'biases': tf.Variable(tf.random_normal([nodes_per_layer[0]]))}]

for i in range(1, hidden_layers_n):
    layers.append({'weights': tf.Variable(tf.random_normal([nodes_per_layer[i - 1], nodes_per_layer[i]])),
                   'biases': tf.Variable(tf.random_normal([nodes_per_layer[i]]))})

output_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer[-1], classes_n])),
                'biases': tf.Variable(tf.random_normal([classes_n]))}


def neural_network_model(data):
    l = []

    l.append(tf.add(tf.matmul(x, layers[0]['weights']), layers[0]['biases']))
    l[0] = tf.nn.relu(l[0])

    for i in range(1, hidden_layers_n):
        l.append(tf.add(tf.matmul(l[i - 1], layers[i]['weights']), layers[i]['biases']))
        l[i] = tf.nn.relu(l[i])

    output = tf.add(tf.matmul(l[hidden_layers_n - 1], output_layer['weights']), output_layer['biases'])

    return output


def train_neural_network(x):
    prediction = neural_network_model(x)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))

    optimizer = tf.train.AdamOptimizer().minimize(cost)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        epoch = 1

        print('Starting training...')
        while epoch <= epochs:
            epoch_loss = 1
            i = 0
            while i < len(train_x):
                start = i
                end = i + batch_size
                batch_x = np.array(train_x[start:end])
                batch_y = np.array(train_y[start:end])
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
                epoch_loss += c
                i += batch_size

            if (epoch + 1) % print_step == 0:
                print('Epoch', epoch + 1, 'out of',
                      '{} completed,'.format(epochs), 'loss:', epoch_loss)
                correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
                accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
                accuracy_number = accuracy.eval({x: test_x, y: test_y})
                accuracy_number_training_set = accuracy.eval({x: train_x, y: train_y})
                print('Train accuracy:', accuracy_number_training_set)
                print('Test accuracy:', accuracy_number)
            epoch += 1

train_neural_network(x)


# this functions converts predictions expressed in numbers to letters corresponding to the different ranking
# classes, for example 0 -> A, 1 -> B, 2 -> C and so on.
def convert_prediction(value):
    predict = ''
    if value == 6:
        predict = 'Def'
    elif value == 5:
        predict = 'F'
    elif value == 4:
        predict = 'E'
    elif value == 3:
        predict = 'D'
    elif value == 2:
        predict = 'C'
    elif value == 1:
        predict = 'B'
    elif value == 0:
        predict = 'A'
    return predict


def use_neural_network(input_data):
    prediction = neural_network_model(x)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        feed_list = [(k['index'], k['balance']) for k in input_data]
        indexes = [k[0] for k in feed_list]
        predictions = sess.run(tf.argmax(prediction.eval(feed_dict={x: [k[1] for k in feed_list]}), 1))
        predictions = np.array([convert_prediction(value) for value in predictions])
        result = list(zip(indexes, predictions))
        return result

if __name__ == '__main__':

    prediction = use_neural_network(data)

    print('\nCalculating errors...')

    predictions_dict = {'A': [],
                        'B': [],
                        'C': [],
                        'D': [],
                        'E': [],
                        'F': [],
                        'Def': []}

    def create_predictions_dict(index, rank):
            for j in data:
                if j['index'] == index:
                    return index, j['rank'], rank

    np = multip.cpu_count()
    p = multip.Pool(processes=np)
    predictions_list = p.starmap(create_predictions_dict, prediction[:5000])
    p.close()
    p.join()

    for elem in predictions_list:
        predictions_dict[elem[1]].append(elem)

    def is_correct(x):
        if x[1] == x[2]:
            return 1
        else:
            return 0
    correct_guesses = sum(is_correct(x) for x in predictions_list)
    correct_ratio = correct_guesses / len(data)

    print('correct:', correct_ratio)

在5000个时代之后,这是我得到的结果:

Epoch 5000 out of 5000 completed, loss: 9.91669559479
Train accuracy: 0.992933
Test accuracy: 0.9686
Calculating errors...
correct: 0.02336

我真正不了解的是TensorFlow中内置方法计算的精度如何高,而我的手算精度如此之低。一般来说,当我从预测中提取数据时,似乎TF计算的准确度越高,我找到的预测结果越不正确。

这让我觉得可能不是训练网络以使猜测尽可能正确,而是训练它以尽可能猜错。但是,我也失败了,看看问题出在哪里。也许在成本函数中?

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))

---编辑---

正如答案中所建议的,我已经纠正了测试fase中变量的恢复,但仍然得到非常低的准确度(大约0.1)。这是更新的代码:

import tensorflow as tf
import numpy as np
import multiprocessing as multip

# this labels data so that a firm in class A has label [1, 0, 0, 0, 0, 0, 0], a firm in
# class B [0, 1, 0, 0, 0, 0, 0] and so on
def calc_label(data):
    label = [0, 0, 0, 0, 0, 0, 0]
    if data['rank'] == 'A':
        label[0] = 1
    elif data['rank'] == 'B':
        label[1] = 1
    elif data['rank'] == 'C':
        label[2] = 1
    elif data['rank'] == 'D':
        label[3] = 1
    elif data['rank'] == 'E':
        label[4] = 1
    elif data['rank'] == 'F':
        label[5] = 1
    elif data['rank'] == 'Def':
        label[6] = 1
    return label


data = [
    {'index': 123,
     'balance': [],
     'probaility': 0.89,
     'failed': True,
     'rank': 'A'},
    {'index': 50234,
     'balance': [],
     'probaility': 0.45,
     'failed': False,
     'rank': 'B'}]


features_and_labels = [[x['balance'], calc_label(x)] for x in data]
features = [x[0] for x in features_and_labels]
labels = [x[1] for x in features_and_labels]

train_size = int(len(labels) * 0.9)
train_y = labels[:train_size]
test_y = labels[train_size:]
train_x = features[:train_size]
test_x = features[train_size:]

classes_n = len(labels[0])
nodes_per_layer = [100, 100]
hidden_layers_n = len(nodes_per_layer)
batch_size = 50000
epochs = 1000
print_step = 50
saving_step = 100

x = tf.placeholder('float', [None, len(features[0])])
y = tf.placeholder('float', [None, classes_n])

current_epoch = tf.Variable(1)

layers = [{'weights': tf.Variable(tf.random_normal([len(features[0]), nodes_per_layer[0]])),
           'biases': tf.Variable(tf.random_normal([nodes_per_layer[0]]))}]

for i in range(1, hidden_layers_n):
    layers.append({'weights': tf.Variable(tf.random_normal([nodes_per_layer[i - 1], nodes_per_layer[i]])),
                   'biases': tf.Variable(tf.random_normal([nodes_per_layer[i]]))})

output_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer[-1], classes_n])),
                'biases': tf.Variable(tf.random_normal([classes_n]))}


def neural_network_model(data):
    l = []

    l.append(tf.add(tf.matmul(x, layers[0]['weights']), layers[0]['biases']))
    l[0] = tf.nn.relu(l[0])

    for i in range(1, hidden_layers_n):
        l.append(tf.add(tf.matmul(l[i - 1], layers[i]['weights']), layers[i]['biases']))
        l[i] = tf.nn.relu(l[i])

    output = tf.add(tf.matmul(l[hidden_layers_n - 1], output_layer['weights']), output_layer['biases'])

    return output


saver = tf.train.Saver()
tf_log = 'tf.log'


def train_neural_network(x):
    prediction = neural_network_model(x)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))

    optimizer = tf.train.AdamOptimizer().minimize(cost)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        try:
            epoch = int(open(tf_log, 'r').read().split('\n')[-2]) + 1
            print('Starting epoch:', epoch)
        except:
            epoch = 1

        if epoch != 1:
            saver.restore(sess, "model.ckpt")

        print('Starting training...')
        while epoch <= epochs:
            epoch_loss = 1
            i = 0
            while i < len(train_x):
                start = i
                end = i + batch_size
                batch_x = np.array(train_x[start:end])
                batch_y = np.array(train_y[start:end])
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
                epoch_loss += c
                i += batch_size

            if (epoch + 1) % print_step == 0:
                print('Epoch', epoch + 1, 'out of',
                      '{} completed,'.format(epochs), 'loss:', epoch_loss)
                correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
                accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
                accuracy_number = accuracy.eval({x: test_x, y: test_y})
                accuracy_number_training_set = accuracy.eval({x: train_x, y: train_y})
                print('Train accuracy:', accuracy_number_training_set)
                print('Test accuracy:', accuracy_number)

            if epoch == 1:
                saver.save(sess, "model.ckpt")
            if (epoch + 1) % saving_step == 0:
                saver.save(sess, "model.ckpt")
                # print('Epoch', epoch, 'completed out of', epochs, 'loss:', epoch_loss)
                with open(tf_log, 'a') as f:
                    f.write(str(epoch) + '\n')
            epoch += 1

train_neural_network(x)

# this functions converts predictions expressed in numbers to letters corresponding to the different ranking
# classes, for example 0 -> A, 1 -> B, 2 -> C and so on.
def convert_prediction(value):
    predict = ''
    if value == 6:
        predict = 'Def'
    elif value == 5:
        predict = 'F'
    elif value == 4:
        predict = 'E'
    elif value == 3:
        predict = 'D'
    elif value == 2:
        predict = 'C'
    elif value == 1:
        predict = 'B'
    elif value == 0:
        predict = 'A'
    return predict


def use_neural_network(input_data):
    prediction = neural_network_model(x)

    with tf.Session() as sess:
        for word in ['weights', 'biases']:
            output_layer[word].initializer.run()
            for variable in layers:
                variable[word].initializer.run()
        saver.restore(sess, "model.ckpt")
        feed_list = [(k['index'], k['balance']) for k in input_data]
        indexes = [k[0] for k in feed_list]
        predictions = sess.run(tf.argmax(prediction.eval(feed_dict={x: [k[1] for k in feed_list]}), 1))
        predictions = np.array([convert_prediction(value) for value in predictions])
        result = list(zip(indexes, predictions))
        return result

if __name__ == '__main__':

    prediction = use_neural_network(data)

    print('\nCalculating errors...')

    predictions_dict = {'A': [],
                        'B': [],
                        'C': [],
                        'D': [],
                        'E': [],
                        'F': [],
                        'Def': []}

    def create_predictions_dict(index, rank):
            for j in data:
                # checks which predictions are made to which firms and adds them to predictions_dict
                if j['index'] == index:
                    return index, j['rank'], rank

    np = multip.cpu_count()
    p = multip.Pool(processes=np)
    predictions_list = p.starmap(create_predictions_dict, prediction[:5000])
    p.close()
    p.join()

    for elem in predictions_list:
        predictions_dict[elem[1]].append(elem)

    def is_correct(x):
        if x[1] == x[2]:
            return 1
        else:
            return 0
    correct_guesses = sum(is_correct(x) for x in predictions_list)
    correct_ratio = correct_guesses / len(data)

    print('correct:', correct_ratio)

1 个答案:

答案 0 :(得分:1)

在您的代码中:

def use_neural_network(input_data):
    prediction = neural_network_model(x)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer()) #<<<<<<<<<<<<<<<<<<

tf.global_variables_initializer初始化网络中的所有变量,即会消除所有已完成的培训。您想要做的是在训练结束时在检查点中保存网络权重,然后通过tf.train.Saver()restore()在您的变量中的学习权重加载它们网络

请注意,在Tensorflow网站上有in-depth tutorial如何保存和恢复网络权重。