mirror of
https://github.com/deepseek-ai/DeepSeek-V3.git
synced 2025-02-23 06:08:58 -05:00
Update generate.py: Add parallel processing for token generation
vThis update introduces parallel processing for token generation using torch.multiprocessing.Pool. The new implementation improves inference speed by processing multiple sequences concurrently. - Added the generate_parallel() function for parallel token generation. - Used multiprocessing to distribute the workload across multiple processes, allowing for faster generation of tokens for multiple prompts. - The generate_single_sequence() function was added to handle individual sequence generation logic, which is called by each worker in parallel. - The num_workers parameter is introduced to control the number of worker processes (default is 4). - Model is shared across processes for efficient memory usage. These changes are particularly beneficial for batch processing or multi-prompt generation scenarios where multiple sequences need to be generated simultaneously.
This commit is contained in:
parent
b5d872ead0
commit
38333fb817
@ -28,54 +28,66 @@ def sample(logits, temperature: float = 1.0):
|
|||||||
|
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
def generate(
|
def generate_single_sequence(args):
|
||||||
|
"""
|
||||||
|
Generates tokens for a single sequence.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: Tuple containing (model, tokens, max_new_tokens, eos_id, temperature)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of generated tokens.
|
||||||
|
"""
|
||||||
|
model, tokens, max_new_tokens, eos_id, temperature = args
|
||||||
|
total_len = min(model.max_seq_len, max_new_tokens + tokens.shape[1])
|
||||||
|
tokens = torch.cat([tokens, torch.full((1, total_len - tokens.shape[1]), -1, dtype=torch.long, device="cuda")], dim=1)
|
||||||
|
|
||||||
|
prev_pos = tokens.shape[1] - max_new_tokens
|
||||||
|
finished = torch.tensor([False], device="cuda")
|
||||||
|
|
||||||
|
for cur_pos in range(prev_pos, total_len):
|
||||||
|
logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
|
||||||
|
next_token = sample(logits, temperature) if temperature > 0 else logits.argmax(dim=-1)
|
||||||
|
tokens[:, cur_pos] = next_token
|
||||||
|
finished |= next_token == eos_id
|
||||||
|
if finished.all():
|
||||||
|
break
|
||||||
|
|
||||||
|
generated_tokens = tokens.tolist()[0]
|
||||||
|
return generated_tokens[tokens.shape[1] - max_new_tokens :]
|
||||||
|
|
||||||
|
@torch.inference_mode()
|
||||||
|
def generate_parallel(
|
||||||
model: Transformer,
|
model: Transformer,
|
||||||
prompt_tokens: List[List[int]],
|
prompt_tokens: List[List[int]],
|
||||||
max_new_tokens: int,
|
max_new_tokens: int,
|
||||||
eos_id: int,
|
eos_id: int,
|
||||||
temperature: float = 1.0
|
temperature: float = 1.0,
|
||||||
|
num_workers: int = 4
|
||||||
) -> List[List[int]]:
|
) -> List[List[int]]:
|
||||||
"""
|
"""
|
||||||
Generates new tokens based on the given prompt tokens using the specified model.
|
Parallelized token generation using multiprocessing.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model (Transformer): The transformer model used for token generation.
|
model (Transformer): The transformer model used for token generation.
|
||||||
prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence.
|
prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence.
|
||||||
max_new_tokens (int): The maximum number of new tokens to generate.
|
max_new_tokens (int): The maximum number of new tokens to generate.
|
||||||
eos_id (int): The end-of-sequence token ID.
|
eos_id (int): The end-of-sequence token ID.
|
||||||
temperature (float, optional): The temperature value for sampling. Defaults to 1.0.
|
temperature (float, optional): Temperature for sampling. Defaults to 1.0.
|
||||||
|
num_workers (int, optional): Number of worker processes for parallel generation.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List[List[int]]: A list of lists containing the generated tokens for each sequence.
|
List[List[int]]: A list of lists containing the generated tokens for each sequence.
|
||||||
"""
|
"""
|
||||||
prompt_lens = [len(t) for t in prompt_tokens]
|
model.share_memory() # Make the model shareable across processes
|
||||||
assert max(prompt_lens) <= model.max_seq_len
|
tokens_list = [torch.tensor(t, dtype=torch.long, device="cuda").unsqueeze(0) for t in prompt_tokens]
|
||||||
total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))
|
|
||||||
tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda")
|
args_list = [(model, tokens, max_new_tokens, eos_id, temperature) for tokens in tokens_list]
|
||||||
for i, t in enumerate(prompt_tokens):
|
|
||||||
tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
|
with mp.Pool(num_workers) as pool:
|
||||||
prev_pos = 0
|
results = pool.map(generate_single_sequence, args_list)
|
||||||
finished = torch.tensor([False] * len(prompt_tokens), device="cuda")
|
|
||||||
prompt_mask = tokens != -1
|
return results
|
||||||
for cur_pos in range(min(prompt_lens), total_len):
|
|
||||||
logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
|
|
||||||
if temperature > 0:
|
|
||||||
next_token = sample(logits, temperature)
|
|
||||||
else:
|
|
||||||
next_token = logits.argmax(dim=-1)
|
|
||||||
next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)
|
|
||||||
tokens[:, cur_pos] = next_token
|
|
||||||
finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)
|
|
||||||
prev_pos = cur_pos
|
|
||||||
if finished.all():
|
|
||||||
break
|
|
||||||
completion_tokens = []
|
|
||||||
for i, toks in enumerate(tokens.tolist()):
|
|
||||||
toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]
|
|
||||||
if eos_id in toks:
|
|
||||||
toks = toks[:toks.index(eos_id)]
|
|
||||||
completion_tokens.append(toks)
|
|
||||||
return completion_tokens
|
|
||||||
|
|
||||||
|
|
||||||
def main(
|
def main(
|
||||||
@ -147,7 +159,7 @@ def main(
|
|||||||
prompts = [line.strip() for line in f.readlines()]
|
prompts = [line.strip() for line in f.readlines()]
|
||||||
assert len(prompts) <= args.max_batch_size
|
assert len(prompts) <= args.max_batch_size
|
||||||
prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
|
prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
|
||||||
completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)
|
completion_tokens = generate_parallel(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature, num_workers=4)
|
||||||
completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
|
completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
|
||||||
for prompt, completion in zip(prompts, completions):
|
for prompt, completion in zip(prompts, completions):
|
||||||
print("Prompt:", prompt)
|
print("Prompt:", prompt)
|
||||||
|
Loading…
Reference in New Issue
Block a user