mirror of
https://github.com/deepseek-ai/DeepSeek-V3.git
synced 2025-04-19 18:18:57 -04:00
Merge a3f30dc59c
into 57d7bd45df
This commit is contained in:
commit
7b07cc985c
@ -24,7 +24,27 @@ def sample(logits, temperature: float = 1.0):
|
|||||||
"""
|
"""
|
||||||
logits = logits / max(temperature, 1e-5)
|
logits = logits / max(temperature, 1e-5)
|
||||||
probs = torch.softmax(logits, dim=-1)
|
probs = torch.softmax(logits, dim=-1)
|
||||||
return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)
|
return torch.multinomial(probs, 1) # Usando uma distribuição de probabilidade
|
||||||
|
|
||||||
|
|
||||||
|
def init_distributed(rank: int, world_size: int, local_rank: int):
|
||||||
|
"""
|
||||||
|
Initialize the distributed process group and set device configurations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
rank (int): The rank of the current process.
|
||||||
|
world_size (int): Total number of processes.
|
||||||
|
local_rank (int): The local rank for multi-GPU configurations.
|
||||||
|
"""
|
||||||
|
if world_size > 1:
|
||||||
|
dist.init_process_group("nccl")
|
||||||
|
global print
|
||||||
|
if rank != 0:
|
||||||
|
print = lambda *_, **__: None
|
||||||
|
torch.cuda.set_device(local_rank)
|
||||||
|
torch.set_default_dtype(torch.bfloat16)
|
||||||
|
torch.set_num_threads(8)
|
||||||
|
torch.manual_seed(965)
|
||||||
|
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
@ -100,20 +120,17 @@ def main(
|
|||||||
world_size = int(os.getenv("WORLD_SIZE", "1"))
|
world_size = int(os.getenv("WORLD_SIZE", "1"))
|
||||||
rank = int(os.getenv("RANK", "0"))
|
rank = int(os.getenv("RANK", "0"))
|
||||||
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
||||||
if world_size > 1:
|
|
||||||
dist.init_process_group("nccl")
|
# Initialize distributed configuration
|
||||||
global print
|
init_distributed(rank, world_size, local_rank)
|
||||||
if rank != 0:
|
|
||||||
print = lambda *_, **__: None
|
|
||||||
torch.cuda.set_device(local_rank)
|
|
||||||
torch.set_default_dtype(torch.bfloat16)
|
|
||||||
torch.set_num_threads(8)
|
|
||||||
torch.manual_seed(965)
|
|
||||||
with open(config) as f:
|
with open(config) as f:
|
||||||
args = ModelArgs(**json.load(f))
|
args = ModelArgs(**json.load(f))
|
||||||
print(args)
|
print(args)
|
||||||
|
|
||||||
with torch.device("cuda"):
|
with torch.device("cuda"):
|
||||||
model = Transformer(args)
|
model = Transformer(args)
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
|
tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
|
||||||
tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
|
tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
|
||||||
load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
|
load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
|
||||||
@ -171,7 +188,7 @@ if __name__ == "__main__":
|
|||||||
--temperature (float, optional): Temperature for sampling. Defaults to 0.2.
|
--temperature (float, optional): Temperature for sampling. Defaults to 0.2.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
AssertionError: If neither input-file nor interactive mode is specified.
|
ValueError: If neither input-file nor interactive mode is specified.
|
||||||
"""
|
"""
|
||||||
parser = ArgumentParser()
|
parser = ArgumentParser()
|
||||||
parser.add_argument("--ckpt-path", type=str, required=True)
|
parser.add_argument("--ckpt-path", type=str, required=True)
|
||||||
@ -181,5 +198,16 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument("--max-new-tokens", type=int, default=200)
|
parser.add_argument("--max-new-tokens", type=int, default=200)
|
||||||
parser.add_argument("--temperature", type=float, default=0.2)
|
parser.add_argument("--temperature", type=float, default=0.2)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
assert args.input_file or args.interactive, "Either input-file or interactive mode must be specified"
|
|
||||||
main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
|
# Validate input
|
||||||
|
if not (args.input_file or args.interactive):
|
||||||
|
raise ValueError("You must specify either --input-file or enable --interactive mode.")
|
||||||
|
|
||||||
|
main(
|
||||||
|
args.ckpt_path,
|
||||||
|
args.config,
|
||||||
|
args.input_file,
|
||||||
|
args.interactive,
|
||||||
|
args.max_new_tokens,
|
||||||
|
args.temperature,
|
||||||
|
)
|
||||||
|
@ -1,191 +1,134 @@
|
|||||||
from typing import Tuple
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import triton
|
import triton
|
||||||
import triton.language as tl
|
import triton.language as tl
|
||||||
from triton import Config
|
|
||||||
|
|
||||||
|
|
||||||
@triton.jit
|
@triton.jit
|
||||||
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
|
def weight_dequant_kernel(
|
||||||
|
q_ptr, s_ptr, out_ptr, M, N,
|
||||||
|
stride_qm, stride_qn, stride_sm, stride_sn,
|
||||||
|
stride_om, stride_on,
|
||||||
|
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`.
|
Triton kernel for FP8 weight dequantization.
|
||||||
|
out = q * s
|
||||||
Args:
|
|
||||||
x_ptr (triton.Pointer): Pointer to the input tensor.
|
|
||||||
y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored.
|
|
||||||
s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored.
|
|
||||||
BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
"""
|
||||||
pid = tl.program_id(axis=0)
|
pid = tl.program_id(axis=0)
|
||||||
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
num_blocks_n = tl.cdiv(N, BLOCK_SIZE_N)
|
||||||
x = tl.load(x_ptr + offs).to(tl.float32)
|
pid_m = pid // num_blocks_n
|
||||||
s = tl.max(tl.abs(x)) / 448.
|
pid_n = pid % num_blocks_n
|
||||||
y = x / s
|
|
||||||
y = y.to(y_ptr.dtype.element_ty)
|
|
||||||
tl.store(y_ptr + offs, y)
|
|
||||||
tl.store(s_ptr + pid, s)
|
|
||||||
|
|
||||||
|
|
||||||
def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
||||||
"""
|
|
||||||
Quantizes the input tensor `x` using block-wise quantization.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`.
|
|
||||||
block_size (int, optional): The size of the blocks to be used for quantization. Default is 128.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Tuple[torch.Tensor, torch.Tensor]: A tuple containing:
|
|
||||||
- The quantized tensor with dtype `torch.float8_e4m3fn`.
|
|
||||||
- A tensor of scaling factors with dtype `torch.float32`.
|
|
||||||
"""
|
|
||||||
assert x.is_contiguous(), 'Input tensor must be contiguous'
|
|
||||||
assert x.size(-1) % block_size == 0, f'Last dimension size must be divisible by block_size (block_size={block_size})'
|
|
||||||
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
|
|
||||||
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
|
|
||||||
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
|
|
||||||
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
|
|
||||||
return y, s
|
|
||||||
|
|
||||||
|
|
||||||
@triton.jit
|
|
||||||
def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
|
|
||||||
"""
|
|
||||||
Dequantizes weights using the provided scaling factors and stores the result.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
x_ptr (tl.pointer): Pointer to the quantized weights.
|
|
||||||
s_ptr (tl.pointer): Pointer to the scaling factors.
|
|
||||||
y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights.
|
|
||||||
M (int): Number of rows in the weight matrix.
|
|
||||||
N (int): Number of columns in the weight matrix.
|
|
||||||
BLOCK_SIZE (tl.constexpr): Size of the block for tiling.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
pid_m = tl.program_id(axis=0)
|
|
||||||
pid_n = tl.program_id(axis=1)
|
|
||||||
n = tl.cdiv(N, BLOCK_SIZE)
|
|
||||||
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
||||||
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
||||||
offs = offs_m[:, None] * N + offs_n[None, :]
|
|
||||||
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
|
||||||
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
|
|
||||||
s = tl.load(s_ptr + pid_m * n + pid_n)
|
|
||||||
y = x * s
|
|
||||||
tl.store(y_ptr + offs, y, mask=mask)
|
|
||||||
|
|
||||||
|
|
||||||
def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Dequantizes the given weight tensor using the provided scale tensor.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
x (torch.Tensor): The quantized weight tensor of shape (M, N).
|
|
||||||
s (torch.Tensor): The scale tensor of shape (M, N).
|
|
||||||
block_size (int, optional): The block size to use for dequantization. Defaults to 128.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
torch.Tensor: The dequantized weight tensor of the same shape as `x`.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2.
|
|
||||||
"""
|
|
||||||
assert x.is_contiguous() and s.is_contiguous(), 'Input tensors must be contiguous'
|
|
||||||
assert x.dim() == 2 and s.dim() == 2, 'Input tensors must have 2 dimensions'
|
|
||||||
M, N = x.size()
|
|
||||||
y = torch.empty_like(x, dtype=torch.get_default_dtype())
|
|
||||||
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
|
|
||||||
weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
|
|
||||||
return y
|
|
||||||
|
|
||||||
|
|
||||||
fp8_gemm_configs = [
|
|
||||||
Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
|
|
||||||
for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
|
|
||||||
]
|
|
||||||
|
|
||||||
@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
|
|
||||||
@triton.jit
|
|
||||||
def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
|
|
||||||
a_s_ptr, b_s_ptr,
|
|
||||||
M, N: tl.constexpr, K: tl.constexpr,
|
|
||||||
BLOCK_SIZE_M: tl.constexpr,
|
|
||||||
BLOCK_SIZE_N: tl.constexpr,
|
|
||||||
BLOCK_SIZE_K: tl.constexpr):
|
|
||||||
"""
|
|
||||||
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
a_ptr (tl.tensor): Pointer to the first input matrix A.
|
|
||||||
b_ptr (tl.tensor): Pointer to the second input matrix B.
|
|
||||||
c_ptr (tl.tensor): Pointer to the output matrix C.
|
|
||||||
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
|
|
||||||
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
|
|
||||||
M (int): Number of rows in matrix A and C.
|
|
||||||
N (tl.constexpr): Number of columns in matrix B and C.
|
|
||||||
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
|
|
||||||
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
|
|
||||||
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
|
|
||||||
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
pid_m = tl.program_id(axis=0)
|
|
||||||
pid_n = tl.program_id(axis=1)
|
|
||||||
k = tl.cdiv(K, BLOCK_SIZE_K)
|
|
||||||
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
|
|
||||||
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
|
|
||||||
offs_k = tl.arange(0, BLOCK_SIZE_K)
|
|
||||||
a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
|
|
||||||
b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
|
|
||||||
a_s_ptrs = a_s_ptr + offs_m * k
|
|
||||||
b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
|
|
||||||
|
|
||||||
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
|
|
||||||
for i in range(k):
|
|
||||||
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
|
|
||||||
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
|
|
||||||
a_s = tl.load(a_s_ptrs)
|
|
||||||
b_s = tl.load(b_s_ptrs)
|
|
||||||
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
|
|
||||||
a_ptrs += BLOCK_SIZE_K
|
|
||||||
b_ptrs += BLOCK_SIZE_K
|
|
||||||
a_s_ptrs += 1
|
|
||||||
b_s_ptrs += 1
|
|
||||||
c = accumulator.to(c_ptr.dtype.element_ty)
|
|
||||||
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
||||||
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
||||||
c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
|
|
||||||
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
mask_m = offs_m < M
|
||||||
tl.store(c_ptrs, c, mask=mask)
|
mask_n = offs_n < N
|
||||||
|
|
||||||
|
q_ptrs = q_ptr + offs_m[:, None] * stride_qm + offs_n[None, :] * stride_qn
|
||||||
|
s_ptrs = s_ptr + offs_m[:, None] * stride_sm + offs_n[None, :] * stride_sn
|
||||||
|
out_ptrs = out_ptr + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
|
||||||
|
|
||||||
|
q = tl.load(q_ptrs, mask=mask_m[:, None] & mask_n[None, :], other=0)
|
||||||
|
s = tl.load(s_ptrs, mask=mask_m[:, None] & mask_n[None, :], other=1)
|
||||||
|
|
||||||
|
out = q.to(tl.float32) * s.to(tl.float32)
|
||||||
|
tl.store(out_ptrs, out, mask=mask_m[:, None] & mask_n[None, :])
|
||||||
|
|
||||||
|
|
||||||
def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
|
@triton.jit
|
||||||
|
def fp8_gemm_kernel(
|
||||||
|
a_ptr, b_ptr, c_ptr, M, N, K,
|
||||||
|
stride_am, stride_ak, stride_bk, stride_bn,
|
||||||
|
stride_cm, stride_cn,
|
||||||
|
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Perform a matrix multiplication using FP8 precision.
|
Triton kernel for FP8 GEMM (General Matrix Multiply)
|
||||||
|
c = a @ b
|
||||||
|
"""
|
||||||
|
pid = tl.program_id(axis=0)
|
||||||
|
num_blocks_n = tl.cdiv(N, BLOCK_SIZE_N)
|
||||||
|
pid_m = pid // num_blocks_n
|
||||||
|
pid_n = pid % num_blocks_n
|
||||||
|
|
||||||
|
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
||||||
|
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
||||||
|
mask_m = offs_m < M
|
||||||
|
mask_n = offs_n < N
|
||||||
|
|
||||||
|
c_ptrs = c_ptr + offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn
|
||||||
|
acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
|
||||||
|
|
||||||
|
for k in range(0, K, BLOCK_SIZE_K):
|
||||||
|
offs_k = k + tl.arange(0, BLOCK_SIZE_K)
|
||||||
|
mask_k = offs_k < K
|
||||||
|
|
||||||
|
a_ptrs = a_ptr + offs_m[:, None] * stride_am + offs_k[None, :] * stride_ak
|
||||||
|
b_ptrs = b_ptr + offs_k[:, None] * stride_bk + offs_n[None, :] * stride_bn
|
||||||
|
|
||||||
|
a = tl.load(a_ptrs, mask=mask_m[:, None] & mask_k[None, :], other=0)
|
||||||
|
b = tl.load(b_ptrs, mask=mask_k[:, None] & mask_n[None, :], other=0)
|
||||||
|
|
||||||
|
acc += tl.dot(a, b)
|
||||||
|
|
||||||
|
tl.store(c_ptrs, acc, mask=mask_m[:, None] & mask_n[None, :])
|
||||||
|
|
||||||
|
|
||||||
|
def dequantize_weights(q_weight: torch.Tensor, scale: torch.Tensor, block_size=16) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Dequantizes FP8 weights using provided scaling factors.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
a (torch.Tensor): The first input matrix, must be contiguous.
|
q_weight (torch.Tensor): Quantized weight matrix (e.g. float8).
|
||||||
a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous.
|
scale (torch.Tensor): Scaling factors.
|
||||||
b (torch.Tensor): The second input matrix, must be contiguous.
|
block_size (int): Block size used in the kernel.
|
||||||
b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
torch.Tensor: The result of the matrix multiplication.
|
torch.Tensor: Dequantized weight matrix (float32).
|
||||||
"""
|
"""
|
||||||
assert a.is_contiguous() and b.is_contiguous(), 'Input tensors must be contiguous'
|
assert q_weight.shape == scale.shape, "Mismatched shapes between quantized weights and scales."
|
||||||
assert a_s.is_contiguous() and b_s.is_contiguous(), 'Scaling factor tensors must be contiguous'
|
|
||||||
K = a.size(-1)
|
M, N = q_weight.shape
|
||||||
M = a.numel() // K
|
output = torch.empty_like(q_weight, dtype=torch.float32)
|
||||||
N = b.size(0)
|
|
||||||
c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
|
grid = (triton.cdiv(M, block_size) * triton.cdiv(N, block_size),)
|
||||||
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
|
weight_dequant_kernel[grid](
|
||||||
fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
|
q_weight, scale, output,
|
||||||
return c
|
M, N,
|
||||||
|
q_weight.stride(0), q_weight.stride(1),
|
||||||
|
scale.stride(0), scale.stride(1),
|
||||||
|
output.stride(0), output.stride(1),
|
||||||
|
block_size, block_size
|
||||||
|
)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def fp8_gemm(a: torch.Tensor, b: torch.Tensor, block_size=16) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Performs GEMM on FP8 dequantized matrices using Triton.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
a (torch.Tensor): Left matrix (float32).
|
||||||
|
b (torch.Tensor): Right matrix (float32).
|
||||||
|
block_size (int): Block size for tiling.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
torch.Tensor: Output matrix (float32).
|
||||||
|
"""
|
||||||
|
assert a.shape[1] == b.shape[0], "Incompatible matrix dimensions."
|
||||||
|
|
||||||
|
M, K = a.shape
|
||||||
|
_, N = b.shape
|
||||||
|
output = torch.empty((M, N), dtype=torch.float32)
|
||||||
|
|
||||||
|
grid = (triton.cdiv(M, block_size) * triton.cdiv(N, block_size),)
|
||||||
|
fp8_gemm_kernel[grid](
|
||||||
|
a, b, output,
|
||||||
|
M, N, K,
|
||||||
|
a.stride(0), a.stride(1),
|
||||||
|
b.stride(0), b.stride(1),
|
||||||
|
output.stride(0), output.stride(1),
|
||||||
|
block_size, block_size, block_size
|
||||||
|
)
|
||||||
|
return output
|
||||||
|
Loading…
Reference in New Issue
Block a user