Question: # models.py import numpy as np import torch import torch.nn as nn from transformer import Transformer from utils import Indexer class LanguageModel ( object )

# models.py
import numpy as np
import torch
import torch.nn as nn
from transformer import Transformer
from utils import Indexer
class LanguageModel(object):
def get_next_char_log_probs(self, context)-> np.ndarray:
"""
Returns a log probability distribution over the next characters given a context.
The log should be base e
NOTE: You should make sure you call model.eval() to determinize inference here (turns off dropout
layers in TransformerEncoder).
:param context: the string context that the LM conditions on
:return: A numpy vector log P(y | context) where y ranges over the output vocabulary.
"""
raise Exception("Only implemented in subclasses")
def get_log_prob_sequence(self, next_chars, context)-> float:
"""
Scores a bunch of characters following context. That is, returns
log P(nc1, nc2, nc3,...| context)= log P(nc1| context)+ log P(nc2| context, nc1),...
The log should be base e
NOTE: You should make sure you call model.eval() to determinize inference here (turns off dropout
layers in TransformerEncoder).
:param next_chars:
:param context:
:return: The float probability
"""
raise Exception("Only implemented in subclasses")
class UniformLanguageModel(LanguageModel):
def __init__(self, voc_size):
self.voc_size = voc_size
def get_next_char_log_probs(self, context):
return np.ones([self.voc_size])* np.log(1.0/self.voc_size)
def get_log_prob_sequence(self, next_chars, context):
return np.log(1.0/self.voc_size)* len(next_chars)

Step by Step Solution

There are 3 Steps involved in it

1 Expert Approved Answer
Step: 1 Unlock blur-text-image
Question Has Been Solved by an Expert!

Get step-by-step solutions from verified subject matter experts

Step: 2 Unlock
Step: 3 Unlock

Students Have Also Explored These Related Programming Questions!