diff --git a/inference/generate.py b/inference/generate.py index fbf3ab8..c81f87f 100644 --- a/inference/generate.py +++ b/inference/generate.py @@ -28,54 +28,66 @@ def sample(logits, temperature: float = 1.0): @torch.inference_mode() -def generate( +def generate_single_sequence(args): + """ + Generates tokens for a single sequence. + + Args: + args: Tuple containing (model, tokens, max_new_tokens, eos_id, temperature) + + Returns: + List of generated tokens. + """ + model, tokens, max_new_tokens, eos_id, temperature = args + total_len = min(model.max_seq_len, max_new_tokens + tokens.shape[1]) + tokens = torch.cat([tokens, torch.full((1, total_len - tokens.shape[1]), -1, dtype=torch.long, device="cuda")], dim=1) + + prev_pos = tokens.shape[1] - max_new_tokens + finished = torch.tensor([False], device="cuda") + + for cur_pos in range(prev_pos, total_len): + logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos) + next_token = sample(logits, temperature) if temperature > 0 else logits.argmax(dim=-1) + tokens[:, cur_pos] = next_token + finished |= next_token == eos_id + if finished.all(): + break + + generated_tokens = tokens.tolist()[0] + return generated_tokens[tokens.shape[1] - max_new_tokens :] + +@torch.inference_mode() +def generate_parallel( model: Transformer, prompt_tokens: List[List[int]], max_new_tokens: int, eos_id: int, - temperature: float = 1.0 + temperature: float = 1.0, + num_workers: int = 4 ) -> List[List[int]]: """ - Generates new tokens based on the given prompt tokens using the specified model. + Parallelized token generation using multiprocessing. Args: model (Transformer): The transformer model used for token generation. prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence. max_new_tokens (int): The maximum number of new tokens to generate. eos_id (int): The end-of-sequence token ID. - temperature (float, optional): The temperature value for sampling. Defaults to 1.0. + temperature (float, optional): Temperature for sampling. Defaults to 1.0. + num_workers (int, optional): Number of worker processes for parallel generation. Returns: List[List[int]]: A list of lists containing the generated tokens for each sequence. """ - prompt_lens = [len(t) for t in prompt_tokens] - assert max(prompt_lens) <= model.max_seq_len - total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens)) - tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda") - for i, t in enumerate(prompt_tokens): - tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda") - prev_pos = 0 - finished = torch.tensor([False] * len(prompt_tokens), device="cuda") - prompt_mask = tokens != -1 - for cur_pos in range(min(prompt_lens), total_len): - logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos) - if temperature > 0: - next_token = sample(logits, temperature) - else: - next_token = logits.argmax(dim=-1) - next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token) - tokens[:, cur_pos] = next_token - finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id) - prev_pos = cur_pos - if finished.all(): - break - completion_tokens = [] - for i, toks in enumerate(tokens.tolist()): - toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens] - if eos_id in toks: - toks = toks[:toks.index(eos_id)] - completion_tokens.append(toks) - return completion_tokens + model.share_memory() # Make the model shareable across processes + tokens_list = [torch.tensor(t, dtype=torch.long, device="cuda").unsqueeze(0) for t in prompt_tokens] + + args_list = [(model, tokens, max_new_tokens, eos_id, temperature) for tokens in tokens_list] + + with mp.Pool(num_workers) as pool: + results = pool.map(generate_single_sequence, args_list) + + return results def main( @@ -147,7 +159,7 @@ def main( prompts = [line.strip() for line in f.readlines()] assert len(prompts) <= args.max_batch_size prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts] - completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature) + completion_tokens = generate_parallel(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature, num_workers=4) completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True) for prompt, completion in zip(prompts, completions): print("Prompt:", prompt)