我使用CNN用TensorFlow对MNIST进行分类。但我想用TFRecordrs读取数据集。但是我的准确率只有10%

时间:2017-09-07 10:01:42

标签: python tensorflow deep-learning mnist

我想用TensorFlow以我的方式对mnist的数据集进行分类 首先,将数据集转换为TFRecords文件 然后,使用tf.TFRecodsReader读取此文件 最后,训练数据集。

现有问题:代码没有任何语法错误。但是,我只能在测试数据上获得10%的准确度。

train.py:

import tensorflow as tf
import Net
import os
import numpy as np
import datetime
import time
import tfrecords as rd

BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 10000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "Model/" 
MODEL_NAME = "model"

def train():
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,
        Net.IMAGE_SIZE,
        Net.IMAGE_SIZE,
        Net.NUM_CHANNELS],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, Net.OUTPUT_NODE], name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = Net.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        55000 / BATCH_SIZE, LEARNING_RATE_DECAY,
        staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    img, one_hot = rd.read_and_decode("./tfrecords/train.tfrecords")
    img_batch, label_batch = tf.train.shuffle_batch(tensors = [img, one_hot], batch_size = BATCH_SIZE, capacity = 10000 + 3 * 100, min_after_dequeue = 10000)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord = coord, sess = sess)
        try:
            steps = 1
            while not coord.should_stop():
                if steps > TRAINING_STEPS:
                    break
                xs, ys = sess.run([img_batch, label_batch])
                _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
                if steps % 1000 == 0:
                    print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                    saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
                steps += 1
        except tf.errors.OutOfRangeError:
            print("Done training after reading all data")
        finally:
            coord.request_stop()
        coord.join(threads)
        sess.close()

def main(argv=None):
    start_time = datetime.datetime.now()
    print("start_time = "),
    print(start_time)
    rd.create_record("train")
    train_start_time = datetime.datetime.now()
    print("train_start_time = " ),
    print( train_start_time)
    train()
    end_time = datetime.datetime.now()
    print("end_time = " ),
    print(end_time)

if __name__ == '__main__':
    main()

Net.py:

import tensorflow as tf

INPUT_NODE = 784
OUTPUT_NODE = 10
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
CONV1_DEEP = 32
CONV1_SIZE = 5
CONV2_DEEP = 64
CONV2_SIZE = 5
FC_SIZE = 512

def inference(input_tensor, train, regularizer):
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable(
            "weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="SAME")
    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable(
            "weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
    with tf.name_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        pool_shape = pool2.get_shape().as_list()
        nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
        reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable("weight", [nodes, FC_SIZE],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        if train: fc1 = tf.nn.dropout(fc1, 0.5)
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases
    return logit

tfrecords.py:

import os
import tensorflow as tf
from PIL import Image
import Net

def create_record(op_type):
    writer = tf.python_io.TFRecordWriter("./tfrecords/" + op_type + ".tfrecords")
    f = open("./" + op_type + ".txt", 'r')
    img_num = 0
    for line in f.readlines():
        img_num += 1
        if img_num % 2000 == 0:
            print("already read in %d images." % (img_num))
        str_split = line.split()
        img_path = "./" + str_split[0]
        index = int(str_split[1])
        img = Image.open(img_path)
        img = img.resize((Net.IMAGE_SIZE, Net.IMAGE_SIZE))
        img_raw = img.tobytes() 
        example = tf.train.Example(features=tf.train.Features(feature={
            'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),
            'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
        }))
        writer.write(example.SerializeToString())
    writer.close()

def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename], shuffle = True)
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw' : tf.FixedLenFeature([], tf.string),
                                       })
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [Net.IMAGE_SIZE, Net.IMAGE_SIZE, Net.NUM_CHANNELS])
    img = 1 - tf.cast(img, tf.float32) * (1. / 255) 
    label = tf.cast(features['label'], tf.int32)
    one_hot = tf.one_hot(label, 10, dtype = tf.float32)
    one_hot = tf.reshape(one_hot, [-1])

    return img, one_hot 

Eval.py:

import time
import math
import tensorflow as tf
import numpy as np
import Net
import Train
import tfrecords as rd

def evaluate():
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [
            10000,
            Net.IMAGE_SIZE,
            Net.IMAGE_SIZE,
            Net.NUM_CHANNELS],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, Net.OUTPUT_NODE], name='y-input')
        #validate_feed = {x: mnist.test.images, y_: mnist.test.labels}
        global_step = tf.Variable(0, trainable=False)
        regularizer = tf.contrib.layers.l2_regularizer(Train.REGULARIZATION_RATE)
        y = Net.inference(x, False, regularizer)
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        variable_averages = tf.train.ExponentialMovingAverage(Train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        for i in range(1):
            img, one_hot = rd.read_and_decode("./tfrecords/test.tfrecords")
            img_batch, label_batch = tf.train.shuffle_batch(tensors = [img, one_hot], batch_size = 10000, capacity = 10000 + 3 * 100, min_after_dequeue = 10000)
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(Train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    #the reason of this error!!!
                    #tf.global_variables_initializer().run()
                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(coord = coord, sess = sess)
                    try:
                        xs, ys = sess.run([img_batch, label_batch])
                        accuracy_score = sess.run(accuracy, feed_dict={x:xs, y_:ys})
                        print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
                    except tf.errors.OutOfRangeError:
                        print("Done testting after reading all data")
                    finally:
                        coord.request_stop()
                    coord.join(threads)
                    sess.close()
                else:
                    print('No checkpoint file found')
                    return

def main(argv=None):
    rd.create_record("test")
    evaluate()

if __name__ == '__main__':
    main()

现在,我已经解决了这个问题。

我的代码和其他flie:https://github.com/xmy7216/MNIST_classification.git

Linux:Red Hat Enterprise Linux Server 7.2(Maipo)
GPU:特斯拉P4
TensorFlow:1.3
的Python:2.7.5

1 个答案:

答案 0 :(得分:0)

我找到原因。因为在使用saver.restore加载模型后,我再次按tf.global_variables_initializer().run()初始化变量。 我多么愚蠢!

相关问题