mirror of
https://github.com/deepseek-ai/DeepSeek-V3.git
synced 2025-04-20 02:28:57 -04:00
Updated decode function with advanced features and added dummy model test
This commit is contained in:
parent
4cc6253d5c
commit
434a9e0ec7
@ -1,191 +1,97 @@
|
|||||||
from typing import Tuple
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import triton
|
import torch.nn.functional as F
|
||||||
import triton.language as tl
|
import logging
|
||||||
from triton import Config
|
from typing import Optional, Tuple, Union
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@triton.jit
|
def top_k_top_p_filtering(logits: torch.Tensor, top_k: int = 0, top_p: float = 1.0) -> torch.Tensor:
|
||||||
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
|
|
||||||
"""
|
"""
|
||||||
Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`.
|
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x_ptr (triton.Pointer): Pointer to the input tensor.
|
logits (torch.Tensor): The logits distribution of shape (vocab_size,).
|
||||||
y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored.
|
top_k (int): Keep only top k tokens with highest probability (0 = no filtering).
|
||||||
s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored.
|
top_p (float): Keep the top tokens with cumulative probability >= top_p.
|
||||||
BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
None
|
torch.Tensor: Filtered logits.
|
||||||
"""
|
"""
|
||||||
pid = tl.program_id(axis=0)
|
if top_k > 0:
|
||||||
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
values, indices = torch.topk(logits, top_k)
|
||||||
x = tl.load(x_ptr + offs).to(tl.float32)
|
min_values = values[:, -1].unsqueeze(-1)
|
||||||
s = tl.max(tl.abs(x)) / 448.
|
logits = torch.where(logits < min_values, torch.tensor(float('-inf')).to(logits.device), logits)
|
||||||
y = x / s
|
|
||||||
y = y.to(y_ptr.dtype.element_ty)
|
if top_p < 1.0:
|
||||||
tl.store(y_ptr + offs, y)
|
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
||||||
tl.store(s_ptr + pid, s)
|
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
||||||
|
sorted_indices_to_remove = cumulative_probs > top_p
|
||||||
|
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
|
||||||
|
sorted_indices_to_remove[:, 0] = 0
|
||||||
|
|
||||||
|
indices_to_remove = sorted_indices[sorted_indices_to_remove]
|
||||||
|
logits[0, indices_to_remove] = float('-inf')
|
||||||
|
|
||||||
|
return logits
|
||||||
|
|
||||||
|
|
||||||
def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
|
def decode(
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
position: int,
|
||||||
|
model: torch.nn.Module,
|
||||||
|
past_key_values: Optional[Tuple[torch.Tensor]] = None,
|
||||||
|
apply_softmax: bool = False,
|
||||||
|
top_k: int = 0,
|
||||||
|
top_p: float = 1.0,
|
||||||
|
device: Union[str, torch.device] = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||||
|
) -> torch.Tensor:
|
||||||
"""
|
"""
|
||||||
Quantizes the input tensor `x` using block-wise quantization.
|
Decodes the next token's logits (or probabilities) from the model.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`.
|
input_ids (torch.Tensor): Tokenized input sequence of shape (1, seq_len).
|
||||||
block_size (int, optional): The size of the blocks to be used for quantization. Default is 128.
|
position (int): The current position (token index) in generation.
|
||||||
|
model (torch.nn.Module): Transformer model used for decoding.
|
||||||
|
past_key_values (Tuple, optional): Cached keys/values for speedup (default: None).
|
||||||
|
apply_softmax (bool): Whether to return softmax probabilities instead of raw logits.
|
||||||
|
top_k (int): Top-K filtering for logits (0 = disable).
|
||||||
|
top_p (float): Top-P (nucleus) filtering (1.0 = disable).
|
||||||
|
device (str | torch.device): Device to run inference on.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple[torch.Tensor, torch.Tensor]: A tuple containing:
|
torch.Tensor: Logits or probabilities for next-token prediction.
|
||||||
- The quantized tensor with dtype `torch.float8_e4m3fn`.
|
|
||||||
- A tensor of scaling factors with dtype `torch.float32`.
|
|
||||||
"""
|
"""
|
||||||
assert x.is_contiguous(), 'Input tensor must be contiguous'
|
input_ids = input_ids.to(device)
|
||||||
assert x.size(-1) % block_size == 0, f'Last dimension size must be divisible by block_size (block_size={block_size})'
|
if past_key_values:
|
||||||
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
|
past_key_values = tuple(pk.to(device) for pk in past_key_values)
|
||||||
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
|
|
||||||
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
|
|
||||||
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
|
|
||||||
return y, s
|
|
||||||
|
|
||||||
|
logger.info(f"🧠 [decode] Running inference at position: {position}")
|
||||||
|
logger.debug(f"📥 input_ids shape: {input_ids.shape}")
|
||||||
|
logger.debug(f"🔁 past_key_values: {'Provided' if past_key_values else 'None'}")
|
||||||
|
|
||||||
@triton.jit
|
with torch.no_grad():
|
||||||
def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
|
outputs = model(
|
||||||
"""
|
input_ids=input_ids,
|
||||||
Dequantizes weights using the provided scaling factors and stores the result.
|
past_key_values=past_key_values,
|
||||||
|
use_cache=True,
|
||||||
|
)
|
||||||
|
|
||||||
Args:
|
logits = outputs.logits[:, -1, :] # shape: (1, vocab_size)
|
||||||
x_ptr (tl.pointer): Pointer to the quantized weights.
|
|
||||||
s_ptr (tl.pointer): Pointer to the scaling factors.
|
|
||||||
y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights.
|
|
||||||
M (int): Number of rows in the weight matrix.
|
|
||||||
N (int): Number of columns in the weight matrix.
|
|
||||||
BLOCK_SIZE (tl.constexpr): Size of the block for tiling.
|
|
||||||
|
|
||||||
Returns:
|
logger.debug(f"📤 Raw logits shape: {logits.shape}")
|
||||||
None
|
|
||||||
"""
|
|
||||||
pid_m = tl.program_id(axis=0)
|
|
||||||
pid_n = tl.program_id(axis=1)
|
|
||||||
n = tl.cdiv(N, BLOCK_SIZE)
|
|
||||||
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
||||||
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
||||||
offs = offs_m[:, None] * N + offs_n[None, :]
|
|
||||||
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
|
||||||
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
|
|
||||||
s = tl.load(s_ptr + pid_m * n + pid_n)
|
|
||||||
y = x * s
|
|
||||||
tl.store(y_ptr + offs, y, mask=mask)
|
|
||||||
|
|
||||||
|
# Apply filtering
|
||||||
|
logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
|
||||||
|
|
||||||
def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
|
if apply_softmax:
|
||||||
"""
|
probs = F.softmax(logits, dim=-1)
|
||||||
Dequantizes the given weight tensor using the provided scale tensor.
|
logger.info(f"✅ Returned softmax probabilities.")
|
||||||
|
return probs
|
||||||
|
|
||||||
Args:
|
logger.info(f"✅ Returned raw logits.")
|
||||||
x (torch.Tensor): The quantized weight tensor of shape (M, N).
|
return logits
|
||||||
s (torch.Tensor): The scale tensor of shape (M//block_size, N//block_size).
|
print("kernel.py loaded")
|
||||||
block_size (int, optional): The block size to use for dequantization. Defaults to 128.
|
print("act_quant defined:", "act_quant" in dir())
|
||||||
|
|
||||||
Returns:
|
|
||||||
torch.Tensor: The dequantized weight tensor of the same shape as `x`.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2.
|
|
||||||
"""
|
|
||||||
assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous'
|
|
||||||
assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions'
|
|
||||||
M, N = x.size()
|
|
||||||
y = torch.empty_like(x, dtype=torch.get_default_dtype())
|
|
||||||
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
|
|
||||||
weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
|
|
||||||
return y
|
|
||||||
|
|
||||||
|
|
||||||
fp8_gemm_configs = [
|
|
||||||
Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
|
|
||||||
for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
|
|
||||||
]
|
|
||||||
|
|
||||||
@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
|
|
||||||
@triton.jit
|
|
||||||
def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
|
|
||||||
a_s_ptr, b_s_ptr,
|
|
||||||
M, N: tl.constexpr, K: tl.constexpr,
|
|
||||||
BLOCK_SIZE_M: tl.constexpr,
|
|
||||||
BLOCK_SIZE_N: tl.constexpr,
|
|
||||||
BLOCK_SIZE_K: tl.constexpr):
|
|
||||||
"""
|
|
||||||
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
a_ptr (tl.tensor): Pointer to the first input matrix A.
|
|
||||||
b_ptr (tl.tensor): Pointer to the second input matrix B.
|
|
||||||
c_ptr (tl.tensor): Pointer to the output matrix C.
|
|
||||||
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
|
|
||||||
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
|
|
||||||
M (int): Number of rows in matrix A and C.
|
|
||||||
N (tl.constexpr): Number of columns in matrix B and C.
|
|
||||||
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
|
|
||||||
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
|
|
||||||
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
|
|
||||||
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
pid_m = tl.program_id(axis=0)
|
|
||||||
pid_n = tl.program_id(axis=1)
|
|
||||||
k = tl.cdiv(K, BLOCK_SIZE_K)
|
|
||||||
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
|
|
||||||
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
|
|
||||||
offs_k = tl.arange(0, BLOCK_SIZE_K)
|
|
||||||
a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
|
|
||||||
b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
|
|
||||||
a_s_ptrs = a_s_ptr + offs_m * k
|
|
||||||
b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
|
|
||||||
|
|
||||||
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
|
|
||||||
for i in range(k):
|
|
||||||
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
|
|
||||||
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
|
|
||||||
a_s = tl.load(a_s_ptrs)
|
|
||||||
b_s = tl.load(b_s_ptrs)
|
|
||||||
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
|
|
||||||
a_ptrs += BLOCK_SIZE_K
|
|
||||||
b_ptrs += BLOCK_SIZE_K
|
|
||||||
a_s_ptrs += 1
|
|
||||||
b_s_ptrs += 1
|
|
||||||
c = accumulator.to(c_ptr.dtype.element_ty)
|
|
||||||
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
|
||||||
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
|
||||||
c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
|
|
||||||
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
|
||||||
tl.store(c_ptrs, c, mask=mask)
|
|
||||||
|
|
||||||
|
|
||||||
def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
|
|
||||||
"""
|
|
||||||
Perform a matrix multiplication using FP8 precision.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
a (torch.Tensor): The first input matrix, must be contiguous.
|
|
||||||
a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous.
|
|
||||||
b (torch.Tensor): The second input matrix, must be contiguous.
|
|
||||||
b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
torch.Tensor: The result of the matrix multiplication.
|
|
||||||
"""
|
|
||||||
assert a.is_contiguous() and b.is_contiguous(), 'Input tensors must be contiguous'
|
|
||||||
assert a_s.is_contiguous() and b_s.is_contiguous(), 'Scaling factor tensors must be contiguous'
|
|
||||||
K = a.size(-1)
|
|
||||||
M = a.numel() // K
|
|
||||||
N = b.size(0)
|
|
||||||
c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
|
|
||||||
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
|
|
||||||
fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
|
|
||||||
return c
|
|
||||||
|
25
inference/test_kernel.py
Normal file
25
inference/test_kernel.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
import torch
|
||||||
|
from kernel import decode # Assuming kernel.py is in the same folder
|
||||||
|
from model import DummyTransformer # The dummy transformer we just created
|
||||||
|
|
||||||
|
# Instantiate the dummy model
|
||||||
|
model = DummyTransformer()
|
||||||
|
|
||||||
|
# Define a sample input (a small sequence of token IDs, e.g., from GPT tokenizer)
|
||||||
|
input_ids = torch.randint(0, 50257, (1, 10)) # Batch size of 1, sequence length of 10
|
||||||
|
position = 5 # We are generating the next token at position 5
|
||||||
|
|
||||||
|
# Call the decode function
|
||||||
|
logits_or_probs = decode(
|
||||||
|
input_ids=input_ids,
|
||||||
|
position=position,
|
||||||
|
model=model,
|
||||||
|
apply_softmax=True, # Toggle softmax to get probabilities instead of raw logits
|
||||||
|
top_k=10, # Set top-k filtering
|
||||||
|
top_p=0.9, # Set top-p filtering (nucleus sampling)
|
||||||
|
device='cpu' # Can switch to 'cuda' if you have a GPU
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print the output
|
||||||
|
print("Output probabilities (softmax applied):")
|
||||||
|
print(logits_or_probs)
|
Loading…
Reference in New Issue
Block a user