DNN中的训练损失没有减少

时间:2018-06-23 00:41:25

标签: python tensorflow machine-learning

我想训练一个模型来学习形状为(1,3751)的特征与形状为(1,1)的标签之间的关系。

它看起来很简单,因此我只使用了两个密集层DNN作为模型类型,希望训练有素的模型可以帮助我做出足够好的预测。但是,预测值相差甚远,我注意到训练过程中的损失根本没有减少。

我尝试了不同的方法,例如更改学习率或增加模型中的隐藏层,但没有一个起作用。

以下是我的代码:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement

import argparse
import sys
import numpy as np
import pandas as pd
import tensorflow as tf

LEARNING_RATE = 0.0001

def model_fn(features, labels, mode, params):
  """Model function for Estimator."""

  input_layer = tf.reshape(features["x"], [1,3751])

  first_hidden_layer = tf.layers.dense(input_layer, 1000, activation=tf.nn.relu)
  second_hidden_layer = tf.layers.dense(first_hidden_layer, 100, activation=tf.nn.relu)
  third_hidden_layer = tf.layers.dense(second_hidden_layer,10, activation=tf.nn.relu)
  predictions = tf.layers.dense(third_hidden_layer, 1)


  # Provide an estimator spec for `ModeKeys.PREDICT`.
  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions={"ages": predictions})


  labels_first_row = tf.reshape(labels[1], [1,-1])
  loss = tf.losses.mean_squared_error(labels_first_row, predictions)

  optimizer = tf.train.GradientDescentOptimizer(
      learning_rate=params["learning_rate"])
  train_op = optimizer.minimize( 
      loss=loss, global_step=tf.train.get_global_step())

  # Calculate root mean squared error as additional eval metric
  eval_metric_ops = {
      "rmse": tf.metrics.root_mean_squared_error(
          tf.cast(labels_first_row, tf.float32), predictions)
  }

  return tf.estimator.EstimatorSpec(
      mode=mode,
      loss=loss,
      train_op=train_op,
      eval_metric_ops=eval_metric_ops)


def main(unused_argv):
  train_file = "training_data.csv"
  test_file = "test_data.csv"

  train_features_interim = pd.read_csv(train_file, usecols=['current'])
  train_features_numpy = np.asarray(train_features_interim, dtype=np.float32)
  train_labels_interim = pd.read_csv(train_file, usecols=['plo_tox'])
  train_labels_numpy = np.asarray(train_labels_interim, dtype=np.float32)

  model_params = {"learning_rate": LEARNING_RATE}

  # Instantiate Estimator
  nn = tf.estimator.Estimator(model_fn=model_fn, params=model_params, 
  model_dir='/tmp/nmos_self_define')

  train_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={"x": train_features_numpy},
      y=train_labels_numpy,
      batch_size = 3751,
      num_epochs= None,
      shuffle=False)

  # Train
  nn.train(input_fn=train_input_fn, steps=10000)

  test_features_interim = pd.read_csv(test_file, usecols = ['current'])
  test_features_numpy = np.asarray(test_features_interim, dtype=np.float32)
  test_labels_interim = pd.read_csv(test_file, usecols=['plo_tox'])
  test_labels_numpy = np.asarray(test_labels_interim, dtype=np.float32)

  # Score accuracy
  test_input_fn = tf.estimator.inputs.numpy_input_fn(
      x={"x": test_features_numpy},
      y=test_labels_numpy,
      batch_size = 3751,
      num_epochs= None,
      shuffle=False)

  ev = nn.evaluate(input_fn=test_input_fn, steps = 500)
  print("Loss: %s" % ev["loss"])
  print("Root Mean Squared Error: %s" % ev["rmse"])

  prediction_file = "Tensorflow_prediction_data.csv"

  predict_features_interim = pd.read_csv(prediction_file, usecols=['current'])
  predict_features_numpy = np.asarray(predict_features_interim, dtype=np.float32)

  # Print out predictions
  predict_input_fn = tf.estimator.inputs.numpy_input_fn(
      x= {"x": predict_features_numpy},
      num_epochs=1,
      batch_size = 3751,
      shuffle=False)

  predictions = nn.predict(input_fn=predict_input_fn)
  for i, p in enumerate(predictions):
    print("Prediction %s: %s" % (i + 1, p["ages"]))


if __name__ == '__main__':
  tf.logging.set_verbosity(tf.logging.INFO)

  parser = argparse.ArgumentParser()
  parser.register("type", "bool", lambda v: v.lower() == "true")
  parser.add_argument(
      "--train_data", type=str, default="", help="Path to the training data.")
  parser.add_argument(
      "--test_data", type=str, default="", help="Path to the test data.")
  parser.add_argument(
      "--predict_data",
      type=str,
      default="",
      help="Path to the prediction data.")
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

由于我只是机器学习的初学者,因此我认为,向更了解的人征求意见可能会更好。我认为我需要调整一些旋钮,但是我不太确定,请提供您认为有用的任何建议。

  1. 更改激活功能
  2. 使用正则化(请让我知道如何正确执行)
  3. 进行一些特征操作
  4. 增加隐藏的层和节点
  5. 增加训练集的大小(现在我只有900套训练数据,够了吗?)

我的输入数据表如下所示: enter image description here

还有其他选择吗?预先感谢您提供任何想法。

1 个答案:

答案 0 :(得分:0)

您说批次大小为3751,在model_fn中您将重塑输入要素的形状以塑造(1, 3751),这没有意义,因为tf.layers.dense在第一维和第二维中期望每个批次和每个功能。