diff --git a/demo/app.py b/demo/app.py index 7dbc59f..885bfc9 100644 --- a/demo/app.py +++ b/demo/app.py @@ -3,21 +3,23 @@ import torch from transformers import AutoConfig, AutoModelForCausalLM from janus.models import MultiModalityCausalLM, VLChatProcessor from PIL import Image +import os import numpy as np # Load model and processor -model_path = "deepseek-ai/Janus-1.3B" -config = AutoConfig.from_pretrained(model_path) +model_path = os.getenv('MODEL_PATH_APP') +config = AutoConfig.from_pretrained(model_path, local_files_only=True) language_config = config.language_config language_config._attn_implementation = 'eager' vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, - language_config=language_config, - trust_remote_code=True) + language_config=language_config, + trust_remote_code=True, + local_files_only=True) vl_gpt = vl_gpt.to(torch.bfloat16).cuda() -vl_chat_processor = VLChatProcessor.from_pretrained(model_path) +vl_chat_processor = VLChatProcessor.from_pretrained(model_path, local_files_only=True) tokenizer = vl_chat_processor.tokenizer cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' # Multimodal Understanding function diff --git a/demo/app_januspro.py b/demo/app_januspro.py index 702e58e..9aa9e8b 100644 --- a/demo/app_januspro.py +++ b/demo/app_januspro.py @@ -12,19 +12,20 @@ import time # Load model and processor -model_path = "deepseek-ai/Janus-Pro-7B" -config = AutoConfig.from_pretrained(model_path) +model_path = os.getenv('MODEL_PATH_APP_JANUSPRO') +config = AutoConfig.from_pretrained(model_path, local_files_only=True) language_config = config.language_config language_config._attn_implementation = 'eager' vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, - language_config=language_config, - trust_remote_code=True) + language_config=language_config, + trust_remote_code=True, + local_files_only=True) if torch.cuda.is_available(): vl_gpt = vl_gpt.to(torch.bfloat16).cuda() else: vl_gpt = vl_gpt.to(torch.float16) -vl_chat_processor = VLChatProcessor.from_pretrained(model_path) +vl_chat_processor = VLChatProcessor.from_pretrained(model_path, local_files_only=True) tokenizer = vl_chat_processor.tokenizer cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' diff --git a/janus.dockerfile b/janus.dockerfile new file mode 100644 index 0000000..e6ad9da --- /dev/null +++ b/janus.dockerfile @@ -0,0 +1,18 @@ +FROM python:3.10-slim + +COPY . /Janus + +WORKDIR /Janus + +RUN pip install --no-cache-dir -r requirements.txt +RUN pip install -e . +RUN pip install --upgrade torch + +ENV GRADIO_SERVER_NAME="0.0.0.0" +ENV MODEL_PATH_APP="models/models-deepseek-ai-Janus-Pro-1B" +ENV MODEL_PATH_APP_JANUSPRO="models/models-deepseek-ai-Janus-Pro-7B" +ENV HF_HOME=/Janus/models +ENV TRANSFORMERS_CACHE=/Janus/models +ENV HF_DATASETS_CACHE=/Janus/models + +CMD ["python", "demo/app.py"] diff --git a/janus_pro_1b.json b/janus_pro_1b.json new file mode 100644 index 0000000..6d291d8 --- /dev/null +++ b/janus_pro_1b.json @@ -0,0 +1,30 @@ +{ + "version": "0.1", + "type": "container", + "meta": { + "trigger": "cli" + }, + "ops": [ + { + "type": "container/run", + "id": "janus_pro_1b", + "args": { + "cmd": [], + "image": "k1llahkeezy/janus-pro:0.0.3", + "gpu": true, + "expose": 7860, + "env": { + "MODEL_PATH_APP": "models/models-deepseek-ai-Janus-Pro-1B", + "GRADIO_SERVER_NAME": "0.0.0.0" + }, + "resources": [ + { + "type": "S3", + "url": "https://models.nosana.io/deepseek/janus/models-deepseek-ai-Janus-Pro-1B", + "target": "/Janus/models/models-deepseek-ai-Janus-Pro-1B" + } + ] + } + } + ] + } \ No newline at end of file diff --git a/janus_pro_7b.json b/janus_pro_7b.json new file mode 100644 index 0000000..8f9c22c --- /dev/null +++ b/janus_pro_7b.json @@ -0,0 +1,30 @@ +{ + "version": "0.1", + "type": "container", + "meta": { + "trigger": "cli" + }, + "ops": [ + { + "type": "container/run", + "id": "janus_pro_7b", + "args": { + "cmd": [], + "image": "k1llahkeezy/janus-pro:0.0.3", + "gpu": true, + "expose": 7860, + "env": { + "MODEL_PATH_APP_JANUSPRO": "models/models-deepseek-ai-Janus-Pro-7B", + "GRADIO_SERVER_NAME": "0.0.0.0" + }, + "resources": [ + { + "type": "S3", + "url": "https://models.nosana.io/hugging-face/deepseek/janus/models-deepseek-ai-Janus-Pro/7B", + "target": "/Janus/models/models-deepseek-ai-Janus-Pro-7B" + } + ] + } + } + ] +} \ No newline at end of file