如何加载张量流模型并继续训练

时间:2017-12-08 06:07:48

标签: python tensorflow

我想加载预训练模型并继续使用此模型进行训练 用于保存模型的标准代码段(pretrain.py):

tf.reset_default_graph()

# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])

mlp_layer_name = ['h1', 'b1', 'h2', 'b2', 'h3', 'b3', 'w_o', 'b_o']
logits = multilayer_perceptron(X, n_input, n_classes, mlp_layer_name)

loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y), name='loss_op')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, name='train_op')

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.

        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = next(train_generator)

            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,
                                                            Y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch

        print("Epoch: {:3d}, cost = {:.6f}".format(epoch+1, avg_cost))

    print("Optimization Finished!")
    saver.save(sess, 'model')
    print("Model saved")

现在加载预训练模型并继续训练(continue.py)。

# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
mlp_layer_name = ['h1', 'b1', 'h2', 'b2', 'h3', 'b3', 'w_o', 'b_o']
logits = multilayer_perceptron(X, n_input, n_classes, mlp_layer_name)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y), name='loss_op')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, name='train_op')

with tf.Session() as sess:
    saver = tf.train.import_meta_graph('model.meta')
    saver.restore(sess, tf.train.latest_checkpoint('./')) # search for checkpoint file

    graph = tf.get_default_graph()

    for epoch in range(training_epochs):
        avg_cost = 0.

        # Loop over all batches
        for i in range(total_batch):
            batch_x, batch_y = next(train_generator)

            # Run optimization op (backprop) and cost op (to get loss value)
            _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,
                                                            Y: batch_y})
            # Compute average loss
            avg_cost += c / total_batch

        print("Epoch: {:3d}, cost = {:.6f}".format(epoch+1, avg_cost))

但它显示以下错误:

  

tensorflow.python.framework.errors_impl.FailedPreconditionError:   试图使用未初始化的值h1            [[Node:h1 / read = IdentityT = DT_FLOAT,_ class = [" loc:@ h1"],_ device =" / job:localhost / replica:0 / task:0 / cpu:0& #34;]]

以下是我的问题:
1.在许多tensorflow的教程中,它使用get_tensor_by_name()来加载权重和偏差。在这里,我不想获得权重和偏见。我只想加载模型并继续训练。
错误表明张量是未初始化的。但是,我认为saver.restore(sess, tf.train.latest_checkpoint('./'))应该成功地加载了权重和偏见 这是multilayer_perceptron(),如果它有助于说明我的questoins。

def multilayer_perceptron(x, n_input, n_classes, name):
    n_hidden_1 = 512
    n_hidden_2 = 256
    n_hidden_3 = 128
    # Store layers weight & bias
    weights = {
        'h1' : tf.get_variable(name[0], initializer=tf.random_normal([n_input, n_hidden_1])),
        'h2' : tf.get_variable(name[2], initializer=tf.random_normal([n_hidden_1, n_hidden_2])),
        'h3' : tf.get_variable(name[4], initializer=tf.random_normal([n_hidden_2, n_hidden_3])),
        'w_o': tf.get_variable(name[6], initializer=tf.random_normal([n_hidden_3, n_classes]))
    }
    biases = {
        'b1' : tf.get_variable(name[1], initializer=tf.random_normal([n_hidden_1])),
        'b2' : tf.get_variable(name[3], initializer=tf.random_normal([n_hidden_2])),
        'b3' : tf.get_variable(name[5], initializer=tf.random_normal([n_hidden_3])),
        'b_o': tf.get_variable(name[7], initializer=tf.random_normal([n_classes]))
    }

    layer_1 = tf.nn.relu(tf.add(tf.matmul(x      , weights['h1']), biases['b1']))
    layer_1 = tf.layers.dropout(layer_1, rate=0.5, training=True)
    layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
    layer_2 = tf.layers.dropout(layer_2, rate=0.3, training=True)
    layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, weights['h3']), biases['b3']))
    layer_3 = tf.layers.dropout(layer_3, rate=0.1, training=True)
    out_layer = tf.matmul(layer_3, weights['w_o']) + biases['b_o']
    return out_layer

1 个答案:

答案 0 :(得分:3)

我想我找到了答案。关键是,如果tf.train.import_meta_graph()已使用saver.restore(sess, tf.train.latest_checkpoint('./')),则无需致电# tf Graph input X = tf.placeholder("float", [None, n_input]) Y = tf.placeholder("float", [None, n_classes]) mlp_layer_name = ['h1', 'b1', 'h2', 'b2', 'h3', 'b3', 'w_o', 'b_o'] logits = multilayer_perceptron(X, n_input, n_classes, mlp_layer_name) loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y), name='loss_op') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op, name='train_op') with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint('./')) # search for checkpoint file graph = tf.get_default_graph() for epoch in range(training_epochs): avg_cost = 0. # Loop over all batches for i in range(total_batch): batch_x, batch_y = next(train_generator) # Run optimization op (backprop) and cost op (to get loss value) _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y}) # Compute average loss avg_cost += c / total_batch print("Epoch: {:3d}, cost = {:.6f}".format(epoch+1, avg_cost)) 。这是我的代码。

registerbackbuttonaction
相关问题