From dec287ca1d452eb2c168b69fdb097286ccb4788d Mon Sep 17 00:00:00 2001 From: Florian Beier Date: Tue, 28 Jan 2025 15:36:45 +0100 Subject: [PATCH] Fix type mismatch in app_januspro.py for CPU mode The input type is still BFloat16 when cuda_device = 'cpu', fix it --- demo/app_januspro.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/app_januspro.py b/demo/app_januspro.py index 702e58e..82cf6b0 100644 --- a/demo/app_januspro.py +++ b/demo/app_januspro.py @@ -22,7 +22,7 @@ vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, if torch.cuda.is_available(): vl_gpt = vl_gpt.to(torch.bfloat16).cuda() else: - vl_gpt = vl_gpt.to(torch.float16) + vl_gpt = vl_gpt.to('cpu') vl_chat_processor = VLChatProcessor.from_pretrained(model_path) tokenizer = vl_chat_processor.tokenizer @@ -156,7 +156,7 @@ def generate_image(prompt, system_prompt='') text = text + vl_chat_processor.image_start_tag - input_ids = torch.LongTensor(tokenizer.encode(text)) + input_ids = torch.LongTensor(tokenizer.encode(text)).to(cuda_device) output, patches = generate(input_ids, width // 16 * 16, height // 16 * 16,