diff --git a/demo/app.py b/demo/app.py index 7dbc59f..b0918f1 100644 --- a/demo/app.py +++ b/demo/app.py @@ -19,7 +19,7 @@ vl_gpt = vl_gpt.to(torch.bfloat16).cuda() vl_chat_processor = VLChatProcessor.from_pretrained(model_path) tokenizer = vl_chat_processor.tokenizer -cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' +cuda_device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' # Multimodal Understanding function @torch.inference_mode() # Multimodal Understanding function diff --git a/demo/app_janusflow.py b/demo/app_janusflow.py index 4777196..01f3b70 100644 --- a/demo/app_janusflow.py +++ b/demo/app_janusflow.py @@ -5,7 +5,13 @@ from PIL import Image from diffusers.models import AutoencoderKL import numpy as np -cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' +cuda_device = 'cpu' +if torch.cuda.is_available(): + cuda_device = 'cuda' +elif torch.backends.mps.is_available(): + cuda_device = 'mps' +else: + cuda_device = 'cpu' # Load model and processor model_path = "deepseek-ai/JanusFlow-1.3B" diff --git a/demo/fastapi_app.py b/demo/fastapi_app.py index c2e5710..a789b61 100644 --- a/demo/fastapi_app.py +++ b/demo/fastapi_app.py @@ -21,7 +21,7 @@ vl_gpt = vl_gpt.to(torch.bfloat16).cuda() vl_chat_processor = VLChatProcessor.from_pretrained(model_path) tokenizer = vl_chat_processor.tokenizer -cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' +cuda_device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' @torch.inference_mode()