fix: demo/fastapi_app.py with mps device.

This commit is contained in:
yangmeng 2025-01-28 23:57:58 +08:00
parent 877c778c0e
commit 1b69d7f99b

View File

@ -12,9 +12,9 @@ app = FastAPI()
# Device and dtype configuration # Device and dtype configuration
def get_device_and_dtype(): def get_device_and_dtype():
if torch.cuda.is_available(): if torch.cuda.is_available():
return 'cuda', torch.bfloat16 return 'cuda', torch.float32
elif torch.backends.mps.is_available(): elif torch.backends.mps.is_available():
return 'mps', torch.float16 return 'mps', torch.float32
return 'cpu', torch.float32 return 'cpu', torch.float32
device, dtype = get_device_and_dtype() device, dtype = get_device_and_dtype()
@ -35,11 +35,17 @@ tokenizer = vl_chat_processor.tokenizer
@torch.inference_mode() @torch.inference_mode()
def multimodal_understanding(image_data, question, seed, top_p, temperature): def multimodal_understanding(image_data, question, seed, top_p, temperature):
torch.cuda.empty_cache() if device == 'cuda' else None # Clear CUDA cache if using CUDA
if device == 'cuda':
torch.cuda.empty_cache()
# set seed
torch.manual_seed(seed) torch.manual_seed(seed)
np.random.seed(seed) np.random.seed(seed)
if device == 'cuda': if device == 'cuda':
torch.cuda.manual_seed(seed) torch.cuda.manual_seed(seed)
elif device == 'mps':
torch.mps.manual_seed(seed)
conversation = [ conversation = [
{ {
@ -94,6 +100,7 @@ def generate(input_ids,
cfg_weight: float = 5, cfg_weight: float = 5,
image_token_num_per_image: int = 576, image_token_num_per_image: int = 576,
patch_size: int = 16): patch_size: int = 16):
try:
torch.cuda.empty_cache() if device == 'cuda' else None torch.cuda.empty_cache() if device == 'cuda' else None
tokens = torch.zeros((parallel_size * 2, len(input_ids)), dtype=torch.int).to(device) tokens = torch.zeros((parallel_size * 2, len(input_ids)), dtype=torch.int).to(device)
for i in range(parallel_size * 2): for i in range(parallel_size * 2):
@ -124,6 +131,8 @@ def generate(input_ids,
) )
return generated_tokens.to(dtype=torch.int), patches return generated_tokens.to(dtype=torch.int), patches
except Exception as e:
raise Exception(f"Error in generate function: {str(e)}")
def unpack(dec, width, height, parallel_size=5): def unpack(dec, width, height, parallel_size=5):
@ -138,11 +147,17 @@ def unpack(dec, width, height, parallel_size=5):
@torch.inference_mode() @torch.inference_mode()
def generate_image(prompt, seed, guidance): def generate_image(prompt, seed, guidance):
torch.cuda.empty_cache() if device == 'cuda' else None try:
seed = seed if seed is not None else 12345 # Clear CUDA cache if using CUDA
if device == 'cuda':
torch.cuda.empty_cache()
# Set the seed for reproducible results
if seed is not None:
torch.manual_seed(seed) torch.manual_seed(seed)
if device == 'cuda': if device == 'cuda':
torch.cuda.manual_seed(seed) torch.cuda.manual_seed(seed)
elif device == 'mps':
torch.mps.manual_seed(seed)
np.random.seed(seed) np.random.seed(seed)
width = 384 width = 384
height = 384 height = 384
@ -161,6 +176,8 @@ def generate_image(prompt, seed, guidance):
images = unpack(patches, width // 16 * 16, height // 16 * 16) images = unpack(patches, width // 16 * 16, height // 16 * 16)
return [Image.fromarray(images[i]).resize((1024, 1024), Image.LANCZOS) for i in range(parallel_size)] return [Image.fromarray(images[i]).resize((1024, 1024), Image.LANCZOS) for i in range(parallel_size)]
except Exception as e:
raise Exception(f"Error in generate_image function: {str(e)}")
@app.post("/generate_images/") @app.post("/generate_images/")