Skip to content

transformer

Transformer

Bases: Distribution

Source code in hfppl/distributions/transformer.py
class Transformer(Distribution):

    def __init__(self, lm, prompt, temp=1.0):
        """Create a Categorical distribution whose values are Tokens, with probabilities given
        by a language model. Supports auto-batching.

        Args:
            lm (hfppl.llms.CachedCausalLM): the language model.
            prompt (str | hfppl.llms.TokenSequence): the sequence of tokens to use as the prompt. If a string, `lm.tokenizer` is used to encode it.
            temp (float): temperature at which to generate (0 < `temp` < `float('inf')`).
        """
        self.lm = lm
        self.temp = temp

        # prompt will be a list of ints
        if isinstance(prompt, str):
            prompt = self.lm.tokenizer.encode(prompt)
        elif isinstance(prompt, TokenSequence):
            prompt = prompt.seq

        self.prompt = prompt

    async def log_prob(self, x):
        log_probs = await self.lm.next_token_logprobs(self.prompt)
        log_probs = log_probs / self.temp

        if isinstance(x, Token):
            x = x.token_id

        return log_probs[x]

    async def sample(self):
        log_probs = await self.lm.next_token_logprobs(self.prompt)
        log_probs = log_probs / self.temp
        probs = np.exp(log_probs)
        token_id = np.random.choice(len(probs), p=(probs))
        logprob = log_probs[token_id]
        return (
            Token(self.lm, token_id, self.lm.tokenizer.convert_ids_to_tokens(token_id)),
            logprob,
        )

__init__(lm, prompt, temp=1.0)

Create a Categorical distribution whose values are Tokens, with probabilities given by a language model. Supports auto-batching.

Parameters:

Name Type Description Default
lm CachedCausalLM

the language model.

required
prompt str | TokenSequence

the sequence of tokens to use as the prompt. If a string, lm.tokenizer is used to encode it.

required
temp float

temperature at which to generate (0 < temp < float('inf')).

1.0
Source code in hfppl/distributions/transformer.py
def __init__(self, lm, prompt, temp=1.0):
    """Create a Categorical distribution whose values are Tokens, with probabilities given
    by a language model. Supports auto-batching.

    Args:
        lm (hfppl.llms.CachedCausalLM): the language model.
        prompt (str | hfppl.llms.TokenSequence): the sequence of tokens to use as the prompt. If a string, `lm.tokenizer` is used to encode it.
        temp (float): temperature at which to generate (0 < `temp` < `float('inf')`).
    """
    self.lm = lm
    self.temp = temp

    # prompt will be a list of ints
    if isinstance(prompt, str):
        prompt = self.lm.tokenizer.encode(prompt)
    elif isinstance(prompt, TokenSequence):
        prompt = prompt.seq

    self.prompt = prompt