RNN的定制(实现批量标准化层)

时间:2020-08-08 09:51:50

标签: recurrent-neural-network batch-normalization attention-model

我实现了AttentionRNN编码器和解码器。
接下来,我想添加一个批处理规范化层。

但是我不知道我应该插入什么功能以及在哪里插入。
我看过文档(https://pytorch.org/docs/stable/nn.html#normalization-layers),
但是,由于有相似的功能,我不知道哪一个最合适,
我的想法是代码中的一点点...但是如果您有更好的想法,请告诉我!

欢迎任何指教:)

class EncoderRNN(torch.nn.Module):
    def __init__(self, input_size, hidden_size):
        super(EncoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.embedding = torch.nn.Embedding(input_size, hidden_size)
        self.gru = torch.nn.GRU(hidden_size, hidden_size)

    def forward(self, input, hidden):
        embedded = self.embedding(input).view(1, input.size(0), -1)
        output = embedded
        output, hidden = self.gru(output, hidden)
        return output, hidden

    def initHidden(self,batch_size):
        return torch.zeros(1, batch_size, self.hidden_size, device=device)
class AttnDecoderRNN(torch.nn.Module):
    def __init__(self, hidden_size, output_size, dropout_p, max_length=Tx):
        super(AttnDecoderRNN, self).__init__()
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.dropout_p = dropout_p
        self.max_length = max_length

        self.embedding = torch.nn.Embedding(self.output_size, self.hidden_size)       
        self.attn = torch.nn.Linear(self.hidden_size * 2, self.max_length)
        self.attn_combine = torch.nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.dropout = torch.nn.Dropout(self.dropout_p)

        # **I dont know how to write...** 
        # self.batch_norm = torch.nn.BatchNorm1d( ??? ) 
        
        self.gru = torch.nn.GRU(self.hidden_size, self.hidden_size)
        self.out = torch.nn.Linear(self.hidden_size, self.output_size)

    def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, input.size(0), -1)
        embedded = self.dropout(embedded)
        
        attn_weights = torch.nn.functional.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(1),encoder_outputs)

        output = torch.cat((embedded[0], attn_applied[:,0,:]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = torch.nn.functional.relu(output)

        # **I dont know how to write...**
        # output = torch.nn.BatchNorm1d(-----)
        
        output, hidden = self.gru(output, hidden)
        
        output = torch.nn.functional.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights

    def initHidden(self,batch_size):
        return torch.zeros(1, batch_size, self.hidden_size, device=device)

谢谢您的帮助:)

0 个答案:

没有答案