Janus docker

This commit is contained in:
KeesGeerligs 2025-01-28 19:12:47 +01:00
parent a74a59f8a9
commit 0e3da71c0d
5 changed files with 91 additions and 10 deletions

View File

@ -3,21 +3,23 @@ import torch
from transformers import AutoConfig, AutoModelForCausalLM from transformers import AutoConfig, AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor from janus.models import MultiModalityCausalLM, VLChatProcessor
from PIL import Image from PIL import Image
import os
import numpy as np import numpy as np
# Load model and processor # Load model and processor
model_path = "deepseek-ai/Janus-1.3B" model_path = os.getenv('MODEL_PATH_APP')
config = AutoConfig.from_pretrained(model_path) config = AutoConfig.from_pretrained(model_path, local_files_only=True)
language_config = config.language_config language_config = config.language_config
language_config._attn_implementation = 'eager' language_config._attn_implementation = 'eager'
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, vl_gpt = AutoModelForCausalLM.from_pretrained(model_path,
language_config=language_config, language_config=language_config,
trust_remote_code=True) trust_remote_code=True,
local_files_only=True)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda() vl_gpt = vl_gpt.to(torch.bfloat16).cuda()
vl_chat_processor = VLChatProcessor.from_pretrained(model_path) vl_chat_processor = VLChatProcessor.from_pretrained(model_path, local_files_only=True)
tokenizer = vl_chat_processor.tokenizer tokenizer = vl_chat_processor.tokenizer
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Multimodal Understanding function # Multimodal Understanding function

View File

@ -12,19 +12,20 @@ import time
# Load model and processor # Load model and processor
model_path = "deepseek-ai/Janus-Pro-7B" model_path = os.getenv('MODEL_PATH_APP_JANUSPRO')
config = AutoConfig.from_pretrained(model_path) config = AutoConfig.from_pretrained(model_path, local_files_only=True)
language_config = config.language_config language_config = config.language_config
language_config._attn_implementation = 'eager' language_config._attn_implementation = 'eager'
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path, vl_gpt = AutoModelForCausalLM.from_pretrained(model_path,
language_config=language_config, language_config=language_config,
trust_remote_code=True) trust_remote_code=True,
local_files_only=True)
if torch.cuda.is_available(): if torch.cuda.is_available():
vl_gpt = vl_gpt.to(torch.bfloat16).cuda() vl_gpt = vl_gpt.to(torch.bfloat16).cuda()
else: else:
vl_gpt = vl_gpt.to(torch.float16) vl_gpt = vl_gpt.to(torch.float16)
vl_chat_processor = VLChatProcessor.from_pretrained(model_path) vl_chat_processor = VLChatProcessor.from_pretrained(model_path, local_files_only=True)
tokenizer = vl_chat_processor.tokenizer tokenizer = vl_chat_processor.tokenizer
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu' cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'

18
janus.dockerfile Normal file
View File

@ -0,0 +1,18 @@
FROM python:3.10-slim
COPY . /Janus
WORKDIR /Janus
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install -e .
RUN pip install --upgrade torch
ENV GRADIO_SERVER_NAME="0.0.0.0"
ENV MODEL_PATH_APP="models/models-deepseek-ai-Janus-Pro-1B"
ENV MODEL_PATH_APP_JANUSPRO="models/models-deepseek-ai-Janus-Pro-7B"
ENV HF_HOME=/Janus/models
ENV TRANSFORMERS_CACHE=/Janus/models
ENV HF_DATASETS_CACHE=/Janus/models
CMD ["python", "demo/app.py"]

30
janus_pro_1b.json Normal file
View File

@ -0,0 +1,30 @@
{
"version": "0.1",
"type": "container",
"meta": {
"trigger": "cli"
},
"ops": [
{
"type": "container/run",
"id": "janus_pro_1b",
"args": {
"cmd": [],
"image": "k1llahkeezy/janus-pro:0.0.3",
"gpu": true,
"expose": 7860,
"env": {
"MODEL_PATH_APP": "models/models-deepseek-ai-Janus-Pro-1B",
"GRADIO_SERVER_NAME": "0.0.0.0"
},
"resources": [
{
"type": "S3",
"url": "https://models.nosana.io/deepseek/janus/models-deepseek-ai-Janus-Pro-1B",
"target": "/Janus/models/models-deepseek-ai-Janus-Pro-1B"
}
]
}
}
]
}

30
janus_pro_7b.json Normal file
View File

@ -0,0 +1,30 @@
{
"version": "0.1",
"type": "container",
"meta": {
"trigger": "cli"
},
"ops": [
{
"type": "container/run",
"id": "janus_pro_7b",
"args": {
"cmd": [],
"image": "k1llahkeezy/janus-pro:0.0.3",
"gpu": true,
"expose": 7860,
"env": {
"MODEL_PATH_APP_JANUSPRO": "models/models-deepseek-ai-Janus-Pro-7B",
"GRADIO_SERVER_NAME": "0.0.0.0"
},
"resources": [
{
"type": "S3",
"url": "https://models.nosana.io/hugging-face/deepseek/janus/models-deepseek-ai-Janus-Pro/7B",
"target": "/Janus/models/models-deepseek-ai-Janus-Pro-7B"
}
]
}
}
]
}