bidirectional_rnn:输入必须是序列

时间:2017-05-10 13:47:39

标签: tensorflow

我想尝试使用bidirectional_rnn来预测时间序列。代码是:    #BiRNN_model.py

import tensorflow as tf

类BiRNN(对象):

"""
a bidirection RNN 
"""

def __init__(self, in_size, out_size, num_steps=20, cell_size=20, batch_size=50,
             num_layers=2, keep_prob=0.5, is_training=True):
    """
    :param in_size: int, the dimension of input
    :param out_size: int, the dimension of output
    :param num_steps: int, the number of time steps
    :param cell_size: int, the size of lstm cell
    :param batch_size: int, the size of mini bacth
    :param num_layers: int, the number of  cells
    :param keep_prob: float, the keep probability of dropout layer
    :param is_training: bool, set True for training model, but False for test model
    """
    self.in_size = in_size
    self.out_size = out_size
    self.num_steps = num_steps
    self.cell_size = cell_size
    self.batch_size = batch_size
    self.num_layers = num_layers
    self.keep_prob = keep_prob
    self.is_training = is_training
    self.__build_model__()

def __build_model__(self):
    """
    The inner method to construct the BiRNN model.
    """
    # Input and output placeholders
    self.x = tf.placeholder(tf.float32, shape=[None, self.num_steps, self.in_size])
    self.y = tf.placeholder(tf.float32, shape=[None, self.num_steps, self.out_size])

    # Add the first input layer
    with tf.variable_scope("input"):
        # Reshape x to 2-D tensor
        inputs = tf.reshape(self.x, shape=[-1, self.in_size])  # [batch_size*num_steps, in_size]
        W, b = self._get_weight_bias(self.in_size, self.cell_size)
        inputs = tf.nn.xw_plus_b(inputs, W, b, name="input_xW_plus_b")
        # inputs = tf.matmul(inputs,W)+b
    # Reshep to 3-D tensor
    #inputs = tf.reshape(inputs, shape=[-1, self.num_steps, self.cell_size])  # [batch_size, num_steps, in_size]
    inputs = tf.reshape(inputs, shape=[-1,  self.in_size])

    # Dropout the inputs
    if self.is_training and self.keep_prob < 1.0:
        inputs = tf.nn.dropout(inputs, keep_prob=self.keep_prob)

    #Construct birnn cells
    biRNN_fw_cell = tf.contrib.rnn.BasicRNNCell(num_units = self.cell_size)
    biRNN_bw_cell = tf.contrib.rnn.BasicRNNCell(num_units = self.cell_size)
    if self.is_training and self.keep_prob < 1.0:
        fw_cell = tf.contrib.rnn.DropoutWrapper(biRNN_fw_cell, output_keep_prob=self.keep_prob)
        bw_cell = tf.contrib.rnn.DropoutWrapper(biRNN_fw_cell, output_keep_prob=self.keep_prob)
    cell_Fw = tf.contrib.rnn.MultiRNNCell([fw_cell] * self.num_layers)
    cell_Bw = tf.contrib.rnn.MultiRNNCell([bw_cell] * self.num_layers)

    #the initial state
    self.init_state_fw = cell_Fw.zero_state(self.batch_size, dtype=tf.float32)
    self.init_state_bw = cell_Bw.zero_state(self.batch_size, dtype=tf.float32)

    #add biRNN layer
    with tf.variable_scope("BRNN"):
        outputs,final_state_fw,final_state_bw = tf.contrib.rnn.static_bidirectional_rnn(cell_Fw,cell_Bw,inputs,
                                                                                       initial_state_fw =self.init_state_fw,
                                                                                       initial_state_bw = self.init_state_bw
                                                                                       )
    self.final_state_fw = final_state_fw
    self.final_state_bw = final_state_bw

    # Add the output layer
    with tf.variable_scope("output"):
        output = tf.reshape(outputs, shape=[-1, self.cell_size])
        W, b = self._get_weight_bias(self.cell_size, self.out_size)
        output = tf.nn.xw_plus_b(output, W, b, name="output")

    self.pred = output
    losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.reshape(self.pred, [-1, ])],
                                                                [tf.reshape(self.y, [-1, ])],
                                                                [tf.ones([self.batch_size * self.num_steps])],
                                                                average_across_timesteps=True,
                                                                softmax_loss_function=self._ms_cost)
    self.cost = tf.reduce_sum(losses) / tf.to_float(self.batch_size)

def _ms_cost(self, y_pred, y_target):
    """The quadratic cost function"""
    return 0.5 * tf.square(y_pred - y_target)

def _get_weight_bias(self, in_size, out_size):
    """
    Create weight and bias variables
    """
    weights = tf.get_variable("weight", shape=[in_size, out_size],
                              initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0))
    biases = tf.get_variable("bias", shape=[out_size, ], initializer=tf.constant_initializer(0.1))
    return weights, biases

但是,如果我运行代码,这是一个错误:   在 build_model 中输入文件“../model/BiRNN_model.py”,第70行     initial_state_bw = self.init_state_bw   在static_bidirectional_rnn中输入文件“/home/lucas/.local/lib/python3.5/site-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py”,第328行     提出TypeError(“输入必须是序列”) TypeError:输入必须是序列

static_bidirectional_rnn中的输入参数不是序列。我是张力流和深度学习的新手,我花了很多天试图修复这个错误,但我失败了。有人可以帮帮我吗? THX。

1 个答案:

答案 0 :(得分:1)

我假设您要创建一个完全展开的双向递归神经网络,因为使用了此函数'static_bidirectional_rnn'。该函数需要一系列张量,因此输入应在时间步长方向上解压缩(在最近的tf版本中为“unstack”)。

错误在此行中,因为它不包含时间步骤:

inputs = tf.reshape(inputs, shape=[-1,  self.in_size])

它应该类似于以下内容:

inputs = tf.unstack(inputs, self.num_steps, 1) # Unstack to get a list of 'self.num_steps' tensors of shape (batch_size, in_size)