Recurrent-Neural-Networks / rnn / model.py
model.py
Raw
import torch
import torch.nn as nn
class RNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, model_type="rnn", n_layers=1):
        super(RNN, self).__init__()
        """
        Initialize the RNN model.
        
        You should create:
        - An Embedding object which will learn a mapping from tensors
        of dimension input_size to embedding of dimension hidden_size.
        - Your RNN network which takes the embedding as input (use models
        in torch.nn). This network should have input size hidden_size and
        output size hidden_size.
        - A linear layer of dimension hidden_size x output_size which
        will predict output scores.

        Inputs:
        - input_size: Dimension of individual element in input sequence to model
        - hidden_size: Hidden layer dimension of RNN model
        - output_size: Dimension of individual element in output sequence from model
        - model_type: RNN network type can be "rnn" (for basic rnn), "gru", or "lstm"
        - n_layers: number of layers in your RNN network
        """
        
        self.model_type = model_type
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        
        ####################################
        #          YOUR CODE HERE          #
        ####################################
        #input_size to embedding of dimension hidden_size
        self.embedding_object = nn.Embedding(self.input_size, self.hidden_size) #mapping from tensors
        
        #rnn_to_create = globals()[model_type]
        #print(rnn_to_create)
        #rnn_result = getattr(nn, model_type.upper() )()
        
        
        # hidden_size and output size hidden_size.
        self.dn_object = getattr(nn, model_type.upper() )(self.hidden_size, self.hidden_size, self.n_layers)
        #self.dn_object = rnn_result(hidden_size, hidden_size, n_layers) #check this
        
        #hidden_size x output_size
        self.linear = nn.Linear(self.hidden_size, self.output_size)

        
        ##########       END      ##########
        


    def forward(self, input, hidden):
        """
        Forward pass through RNN model. Use your Embedding object to create 
        an embedded input to your RNN network. You should then use the 
        linear layer to get an output of self.output_size. 

        Inputs:
        - input: the input data tensor to your model of dimension (batch_size)
        - hidden: the hidden state tensor of dimension (n_layers x batch_size x hidden_size) 

        Returns:
        - output: the output of your linear layer
        - hidden: the output of the RNN network before your linear layer (hidden state)
        """
        
        #output = None
        #hidden = None
        
        ####################################
        #          YOUR CODE HERE          #
        ####################################
        batch_size=input.size()[0] 
        #print("batch size here",batch_size)
        #print("input_before",input.size())
        
        #print("input_after",new_input)
         
        embed_output = self.embedding_object(input)
        #print("size of embed output", embed_output.size())
        
        
        new_embed_output = embed_output.view(1, batch_size, hidden) #(sequence length, batch size, hidden)
        #dn_output, dn_output_hidden = self.dn_object(new_embed_output, hidden) #the output of the RNN network before your linear layer (hidden state)
        dn_output, dn_output_hidden = self.dn_object(new_embed_output, hidden)
        #print("this is the size of rnn output",dn_output.size())
        
        # hints  from -- https://stackoverflow.com/questions/49466894/how-to-correctly-give-inputs-to-embedding-lstm-and-linear-layers-in-pytorch
        
        new_dn_output=dn_output.view(batch_size, -1)
        output_linear =self.linear(new_dn_output)
        
        
        #print("size of the output lienar size", output_linear.size())
        
        hidden=dn_output_hidden
        output=output_linear  #the output of your linear layer
        ##########       END      ##########
        
        
        return output, hidden

    def init_hidden(self, batch_size, device=None):
        """
        Initialize hidden states to all 0s during training.
        
        Hidden states should be initilized to dimension (n_layers x batch_size x hidden_size) 

        Inputs:
        - batch_size: batch size

        Returns:
        - hidden: initialized hidden values for input to forward function
        """
         #dtype=logits_fake.dtype, layout=logits_fake.layout, device=logits_fake.device)
        ####################################
        #          YOUR CODE HERE          #
        ####################################
        if (self.model_type == "rnn" or self.model_type == "gru"):
            hidden = (torch.zeros((self.n_layers, batch_size, self.hidden_size),requires_grad=True)).to(device)
        if (self.model_type == "lstm"):
           # hidden = (torch.zeros(self.n_layers, batch_size, self.hidden_size),torch.zeros(self.n_layers, batch_size, self.hidden_size) )
           hidden =  ((torch.zeros((self.n_layers, batch_size, self.hidden_size),requires_grad=True).to(device)),(torch.zeros((self.n_layers, batch_size, self.hidden_size),requires_grad=True).to(device)))

        ##########       END      ##########

        return hidden