神经网络错误是不变的

时间:2018-01-15 22:23:13

标签: python tensorflow neural-network deep-learning recurrent-neural-network

我正在建立一个基于此链接的回归神经网络:intro to RNN in Tensorflow 但是当我尝试运行它时,测试集的精确度保持在19.1%,列车和验证集的准确度相似。

根据我的理解,看起来网络根本没有更新权重和偏差,但我无法理解原因。

这是我正在运行的代码:

def lazy_property(function):
    attribute = '_' + function.__name__

    @property
    @functools.wraps(function)
    def wrapper(self):
        if not hasattr(self, attribute):
            setattr(self, attribute, function(self))
        return getattr(self, attribute)
    return wrapper


class SequenceClassification:

    def __init__(self, data, target, dropout, num_hidden=200, num_layers=3):
        self.data = data
        self.target = target
        self.dropout = dropout
        self._num_hidden = num_hidden
        self._num_layers = num_layers
        self.prediction
        self.error
        self.optimize

    @lazy_property
    def prediction(self):
        # Recurrent network.
        cells = []
        for _ in range(self._num_layers):
            cell = tf.contrib.rnn.GRUCell(self._num_hidden) # Or LSTMCell(num_units)
            cell = tf.contrib.rnn.DropoutWrapper(
            cell, output_keep_prob=1.0 - self.dropout)
            cells.append(cell)
        network = tf.contrib.rnn.MultiRNNCell(cells)
        output, _ = tf.nn.dynamic_rnn(network, self.data, dtype=tf.float32)
        # Select last output.
        output = tf.transpose(output, [1, 0, 2])
        last = tf.gather(output, int(output.get_shape()[0]) - 1)
        # Softmax layer.
        weight, bias = self._weight_and_bias(
            self._num_hidden, int(self.target.get_shape()[1]))
        prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
        return prediction

    @lazy_property
    def cost(self):
        cross_entropy = -tf.reduce_sum(self.target * tf.log(self.prediction))
        return cross_entropy

    @lazy_property
    def optimize(self):
        learning_rate = 0.003
        optimizer = tf.train.RMSPropOptimizer(learning_rate)
        return optimizer.minimize(self.cost)

    @lazy_property
    def error(self):
        mistakes = tf.not_equal(
            tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
        return tf.reduce_mean(tf.cast(mistakes, tf.float32))

    @staticmethod
    def _weight_and_bias(in_size, out_size):
        weight = tf.truncated_normal([in_size, out_size], stddev=0.01)
        bias = tf.constant(0.1, shape=[out_size])
        return tf.Variable(weight), tf.Variable(bias)

batch_size = 5000
epochs = 20


def main():
    length = 5
    features_size = 55
    num_classes = 2
    data = tf.placeholder(tf.float32, [None, length, features_size])
    target = tf.placeholder(tf.float32, [None, num_classes])
    dropout = tf.placeholder(tf.float32)
    model = SequenceClassification(data, target, dropout)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    graph = []
    for epoch in range(epochs):
        epoch_loss = 0
        i = 0
        if epoch % 3 == 0:
            random_x = np.random.permutation(train_x)
            random_y = np.random.permutation(train_y)
        # while i < len(train_x):
        while i < 100000:
            start = i
            end = i + batch_size
            batch_x = random_x[start:end]
            batch_y = random_y[start:end]
            # batch_x = train_x[start:end]
            # batch_y = train_y[start:end]
            sess.run(model.optimize, {data: batch_x, target: batch_y, dropout: 0.5})
            i += batch_size
        print('epoch {} completed'.format(epoch + 1))
        if epoch % 5 == 0:
            train_errors = []
            while i < len(train_x):
                start = i
                end = i + 100000
                batch_train_x = train_x[start:end]
                batch_train_y = train_y[start:end]
                train_errors.append(sess.run(model.error, {data: batch_train_x, target: batch_train_y, dropout: 1}))
                i += 100000
            train_error = np.mean(train_errors)
            test_error = sess.run(model.error, {data: test_x, target: test_y, dropout: 1})
            validation_error = sess.run(model.error,
                                        {data: validation_x, target: validation_y, dropout: 1})
            print('Epoch {:2d} error {:3.1f}%, loss:'.format(epoch + 1, 100 * test_error), epoch_loss)
            print([epoch + 1, train_error, test_error, None, validation_error])
            graph.append([epoch + 1, train_error, test_error, None, validation_error])


if __name__ == '__main__':
    print('initialising model...')
    main()

这是对此链接可用的修改(当我尝试时,它有完全相同的问题):RNN sequence clasification。这是我运行代码时得到的输出:

epoch 0 completed
Epoch  1 error 19.1%, loss: 0
[1, 0.19120942, 0.19113493, None, 0.18927938]
epoch 1 completed
epoch 2 completed
epoch 3 completed
epoch 4 completed
epoch 5 completed
Epoch  6 error 19.1%, loss: 0
[6, 0.19120942, 0.19113493, None, 0.18927938]
epoch 6 completed
epoch 7 completed
epoch 8 completed
epoch 9 completed
epoch 10 completed
Epoch 11 error 19.1%, loss: 0
[11, 0.19120942, 0.19113493, None, 0.18927938]
epoch 11 completed
epoch 12 completed
epoch 13 completed
epoch 14 completed
epoch 15 completed
Epoch 16 error 19.1%, loss: 0
[16, 0.19120942, 0.19113493, None, 0.18927938]
epoch 16 completed
epoch 17 completed
epoch 18 completed
epoch 19 completed

这个问题的原因是什么?

0 个答案:

没有答案
相关问题