mirror of
https://github.com/deepseek-ai/DeepSeek-VL.git
synced 2025-04-19 18:19:03 -04:00
chore: make format
This commit is contained in:
parent
90a18501d7
commit
48cc0deea6
2
Makefile
2
Makefile
@ -4,7 +4,7 @@ COPYRIGHT = "DeepSeek."
|
|||||||
PROJECT_PATH = deepseek_vl
|
PROJECT_PATH = deepseek_vl
|
||||||
SHELL = /bin/bash
|
SHELL = /bin/bash
|
||||||
SOURCE_FOLDERS = deepseek_vl
|
SOURCE_FOLDERS = deepseek_vl
|
||||||
PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi")
|
PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi") cli_chat.py inference.py
|
||||||
COMMIT_HASH = $(shell git log -1 --format=%h)
|
COMMIT_HASH = $(shell git log -1 --format=%h)
|
||||||
PATH := $(HOME)/go/bin:$(PATH)
|
PATH := $(HOME)/go/bin:$(PATH)
|
||||||
PYTHON ?= $(shell command -v python3 || command -v python)
|
PYTHON ?= $(shell command -v python3 || command -v python)
|
||||||
|
@ -192,7 +192,7 @@ This code repository is licensed under [the MIT License](https://github.com/deep
|
|||||||
```
|
```
|
||||||
@misc{lu2024deepseekvl,
|
@misc{lu2024deepseekvl,
|
||||||
title={DeepSeek-VL: Towards Real-World Vision-Language Understanding},
|
title={DeepSeek-VL: Towards Real-World Vision-Language Understanding},
|
||||||
author={Haoyu Lu and Wen Liu and Bo Zhang and Bingxuan Wang and Kai Dong and Bo Liu and Jingxiang Sun and Tongzheng Ren and Zhuoshu Li and Yaofeng Sun and Chengqi Deng and Hanwei Xu and Zhenda Xie and Chong Ruan},
|
author={Haoyu Lu and Wen Liu and Bo Zhang and Bingxuan Wang and Kai Dong and Bo Liu and Jingxiang Sun and Tongzheng Ren and Zhuoshu Li and Hao Yang and Yaofeng Sun and Chengqi Deng and Hanwei Xu and Zhenda Xie and Chong Ruan},
|
||||||
year={2024},
|
year={2024},
|
||||||
eprint={2403.05525},
|
eprint={2403.05525},
|
||||||
archivePrefix={arXiv},
|
archivePrefix={arXiv},
|
||||||
|
64
cli_chat.py
64
cli_chat.py
@ -3,9 +3,10 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from PIL import Image
|
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from PIL import Image
|
||||||
from transformers import TextIteratorStreamer
|
from transformers import TextIteratorStreamer
|
||||||
|
|
||||||
from deepseek_vl.utils.io import load_pretrained_model
|
from deepseek_vl.utils.io import load_pretrained_model
|
||||||
@ -33,22 +34,19 @@ def get_help_message(image_token):
|
|||||||
|
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
def response(args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
def response(
|
||||||
|
args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config
|
||||||
|
):
|
||||||
prompt = conv.get_prompt()
|
prompt = conv.get_prompt()
|
||||||
prepare_inputs = vl_chat_processor.__call__(
|
prepare_inputs = vl_chat_processor.__call__(
|
||||||
prompt=prompt,
|
prompt=prompt, images=pil_images, force_batchify=True
|
||||||
images=pil_images,
|
|
||||||
force_batchify=True
|
|
||||||
).to(vl_gpt.device)
|
).to(vl_gpt.device)
|
||||||
|
|
||||||
# run image encoder to get the image embeddings
|
# run image encoder to get the image embeddings
|
||||||
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
||||||
|
|
||||||
streamer = TextIteratorStreamer(
|
streamer = TextIteratorStreamer(
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True
|
||||||
skip_prompt=True,
|
|
||||||
skip_special_tokens=True
|
|
||||||
)
|
)
|
||||||
generation_config["inputs_embeds"] = inputs_embeds
|
generation_config["inputs_embeds"] = inputs_embeds
|
||||||
generation_config["attention_mask"] = prepare_inputs.attention_mask
|
generation_config["attention_mask"] = prepare_inputs.attention_mask
|
||||||
@ -79,7 +77,6 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
|||||||
help_msg = get_help_message(image_token)
|
help_msg = get_help_message(image_token)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
print(help_msg)
|
print(help_msg)
|
||||||
|
|
||||||
pil_images = []
|
pil_images = []
|
||||||
@ -87,9 +84,10 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
|||||||
roles = conv.roles
|
roles = conv.roles
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
# get user input
|
# get user input
|
||||||
user_input = get_user_input(f"{roles[0]} [{image_token} indicates an image]: ")
|
user_input = get_user_input(
|
||||||
|
f"{roles[0]} [{image_token} indicates an image]: "
|
||||||
|
)
|
||||||
|
|
||||||
if user_input == "exit":
|
if user_input == "exit":
|
||||||
print("Chat program exited.")
|
print("Chat program exited.")
|
||||||
@ -115,7 +113,9 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
|||||||
|
|
||||||
while cur_img_idx < num_images:
|
while cur_img_idx < num_images:
|
||||||
try:
|
try:
|
||||||
image_file = input(f"({cur_img_idx + 1}/{num_images}) Input the image file path: ")
|
image_file = input(
|
||||||
|
f"({cur_img_idx + 1}/{num_images}) Input the image file path: "
|
||||||
|
)
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print()
|
print()
|
||||||
@ -134,11 +134,21 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
|||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f"File error, `{image_file}` does not exist. Please input the correct file path.")
|
print(
|
||||||
|
f"File error, `{image_file}` does not exist. Please input the correct file path."
|
||||||
|
)
|
||||||
|
|
||||||
# get the answer by the model's prediction
|
# get the answer by the model's prediction
|
||||||
answer = ""
|
answer = ""
|
||||||
answer_iter = response(args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config)
|
answer_iter = response(
|
||||||
|
args,
|
||||||
|
conv,
|
||||||
|
pil_images,
|
||||||
|
tokenizer,
|
||||||
|
vl_chat_processor,
|
||||||
|
vl_gpt,
|
||||||
|
generation_config,
|
||||||
|
)
|
||||||
sys.stdout.write(f"{conv.roles[1]}: ")
|
sys.stdout.write(f"{conv.roles[1]}: ")
|
||||||
for char in answer_iter:
|
for char in answer_iter:
|
||||||
answer += char
|
answer += char
|
||||||
@ -152,7 +162,6 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
|
|||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
|
|
||||||
# setup
|
# setup
|
||||||
tokenizer, vl_chat_processor, vl_gpt = load_pretrained_model(args.model_path)
|
tokenizer, vl_chat_processor, vl_gpt = load_pretrained_model(args.model_path)
|
||||||
generation_config = dict(
|
generation_config = dict(
|
||||||
@ -163,12 +172,14 @@ def main(args):
|
|||||||
use_cache=True,
|
use_cache=True,
|
||||||
)
|
)
|
||||||
if args.temperature > 0:
|
if args.temperature > 0:
|
||||||
generation_config.update({
|
generation_config.update(
|
||||||
"do_sample": True,
|
{
|
||||||
"top_p": args.top_p,
|
"do_sample": True,
|
||||||
"temperature": args.temperature,
|
"top_p": args.top_p,
|
||||||
"repetition_penalty": args.repetition_penalty,
|
"temperature": args.temperature,
|
||||||
})
|
"repetition_penalty": args.repetition_penalty,
|
||||||
|
}
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
generation_config.update({"do_sample": False})
|
generation_config.update({"do_sample": False})
|
||||||
|
|
||||||
@ -177,12 +188,15 @@ def main(args):
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("--model_path", type=str, default="deepseek-ai/deepseek-vl-7b-chat",
|
parser.add_argument(
|
||||||
help="the huggingface model name or the local path of the downloaded huggingface model.")
|
"--model_path",
|
||||||
|
type=str,
|
||||||
|
default="deepseek-ai/deepseek-vl-7b-chat",
|
||||||
|
help="the huggingface model name or the local path of the downloaded huggingface model.",
|
||||||
|
)
|
||||||
parser.add_argument("--temperature", type=float, default=0.2)
|
parser.add_argument("--temperature", type=float, default=0.2)
|
||||||
parser.add_argument("--top_p", type=float, default=0.95)
|
parser.add_argument("--top_p", type=float, default=0.95)
|
||||||
parser.add_argument("--repetition_penalty", type=float, default=1.1)
|
parser.add_argument("--repetition_penalty", type=float, default=1.1)
|
||||||
parser.add_argument("--max_gen_len", type=int, default=512)
|
parser.add_argument("--max_gen_len", type=int, default=512)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
main(args)
|
main(args)
|
||||||
|
|
||||||
|
@ -1,17 +1,46 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
# -*- coding:utf-8 -*-
|
# -*- coding:utf-8 -*-
|
||||||
|
|
||||||
import gradio as gr
|
|
||||||
import torch
|
|
||||||
import base64
|
import base64
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from app_modules.gradio_utils import (cancel_outputing, delete_last_conversation, reset_state,
|
import gradio as gr
|
||||||
reset_textbox, transfer_input, wrap_gen_fn)
|
import torch
|
||||||
|
from app_modules.gradio_utils import (
|
||||||
|
cancel_outputing,
|
||||||
|
delete_last_conversation,
|
||||||
|
reset_state,
|
||||||
|
reset_textbox,
|
||||||
|
transfer_input,
|
||||||
|
wrap_gen_fn,
|
||||||
|
)
|
||||||
from app_modules.overwrites import reload_javascript
|
from app_modules.overwrites import reload_javascript
|
||||||
from app_modules.presets import CONCURRENT_COUNT, description, description_top, title
|
from app_modules.presets import CONCURRENT_COUNT, description, description_top, title
|
||||||
from app_modules.utils import (configure_logger, is_variable_assigned,
|
from app_modules.utils import configure_logger, is_variable_assigned, strip_stop_words
|
||||||
strip_stop_words)
|
|
||||||
from deepseek_vl.serve.inference import convert_conversation_to_prompts, deepseek_generate, load_model
|
from deepseek_vl.serve.inference import (
|
||||||
|
convert_conversation_to_prompts,
|
||||||
|
deepseek_generate,
|
||||||
|
load_model,
|
||||||
|
)
|
||||||
from deepseek_vl.utils.conversation import SeparatorStyle
|
from deepseek_vl.utils.conversation import SeparatorStyle
|
||||||
|
|
||||||
|
|
||||||
@ -31,7 +60,9 @@ models = load_models()
|
|||||||
MODELS = sorted(list(models.keys()))
|
MODELS = sorted(list(models.keys()))
|
||||||
|
|
||||||
|
|
||||||
def generate_prompt_with_history(text, image, history, vl_chat_processor, tokenizer, max_length=2048):
|
def generate_prompt_with_history(
|
||||||
|
text, image, history, vl_chat_processor, tokenizer, max_length=2048
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Generate a prompt with history for the deepseek application.
|
Generate a prompt with history for the deepseek application.
|
||||||
|
|
||||||
@ -57,8 +88,10 @@ def generate_prompt_with_history(text, image, history, vl_chat_processor, tokeni
|
|||||||
conversation.messages = history
|
conversation.messages = history
|
||||||
|
|
||||||
if image is not None:
|
if image is not None:
|
||||||
if '<image_placeholder>' not in text:
|
if "<image_placeholder>" not in text:
|
||||||
text = '<image_placeholder>' + '\n' + text # append the <image_placeholder> in a new line after the text prompt
|
text = (
|
||||||
|
"<image_placeholder>" + "\n" + text
|
||||||
|
) # append the <image_placeholder> in a new line after the text prompt
|
||||||
text = (text, image)
|
text = (text, image)
|
||||||
|
|
||||||
conversation.append_message(conversation.roles[user_role_ind], text)
|
conversation.append_message(conversation.roles[user_role_ind], text)
|
||||||
@ -73,7 +106,11 @@ def generate_prompt_with_history(text, image, history, vl_chat_processor, tokeni
|
|||||||
|
|
||||||
for _ in range(rounds):
|
for _ in range(rounds):
|
||||||
current_prompt = get_prompt(conversation)
|
current_prompt = get_prompt(conversation)
|
||||||
current_prompt = current_prompt.replace("</s>", "") if sft_format == "deepseek" else current_prompt
|
current_prompt = (
|
||||||
|
current_prompt.replace("</s>", "")
|
||||||
|
if sft_format == "deepseek"
|
||||||
|
else current_prompt
|
||||||
|
)
|
||||||
|
|
||||||
if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
|
if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
|
||||||
return conversation_copy
|
return conversation_copy
|
||||||
@ -101,11 +138,11 @@ def to_gradio_chatbot(conv):
|
|||||||
if type(msg) is tuple:
|
if type(msg) is tuple:
|
||||||
msg, image = msg
|
msg, image = msg
|
||||||
if isinstance(image, str):
|
if isinstance(image, str):
|
||||||
with open(image, 'rb') as f:
|
with open(image, "rb") as f:
|
||||||
data = f.read()
|
data = f.read()
|
||||||
img_b64_str = base64.b64encode(data).decode()
|
img_b64_str = base64.b64encode(data).decode()
|
||||||
image_str = f'<video src="data:video/mp4;base64,{img_b64_str}" controls width="426" height="240"></video>'
|
image_str = f'<video src="data:video/mp4;base64,{img_b64_str}" controls width="426" height="240"></video>'
|
||||||
msg = msg.replace('\n'.join(['<image_placeholder>'] * 4), image_str)
|
msg = msg.replace("\n".join(["<image_placeholder>"] * 4), image_str)
|
||||||
else:
|
else:
|
||||||
max_hw, min_hw = max(image.size), min(image.size)
|
max_hw, min_hw = max(image.size), min(image.size)
|
||||||
aspect_ratio = max_hw / min_hw
|
aspect_ratio = max_hw / min_hw
|
||||||
@ -122,7 +159,7 @@ def to_gradio_chatbot(conv):
|
|||||||
image.save(buffered, format="JPEG")
|
image.save(buffered, format="JPEG")
|
||||||
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
||||||
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
|
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
|
||||||
msg = msg.replace('<image_placeholder>', img_str)
|
msg = msg.replace("<image_placeholder>", img_str)
|
||||||
ret.append([msg, None])
|
ret.append([msg, None])
|
||||||
else:
|
else:
|
||||||
ret[-1][-1] = msg
|
ret[-1][-1] = msg
|
||||||
@ -135,24 +172,24 @@ def to_gradio_history(conv):
|
|||||||
|
|
||||||
|
|
||||||
def get_prompt(conv) -> str:
|
def get_prompt(conv) -> str:
|
||||||
"""Get the prompt for generation."""
|
"""Get the prompt for generation."""
|
||||||
system_prompt = conv.system_template.format(system_message=conv.system_message)
|
system_prompt = conv.system_template.format(system_message=conv.system_message)
|
||||||
if conv.sep_style == SeparatorStyle.DeepSeek:
|
if conv.sep_style == SeparatorStyle.DeepSeek:
|
||||||
seps = [conv.sep, conv.sep2]
|
seps = [conv.sep, conv.sep2]
|
||||||
if system_prompt == "" or system_prompt is None:
|
if system_prompt == "" or system_prompt is None:
|
||||||
ret = ""
|
ret = ""
|
||||||
else:
|
|
||||||
ret = system_prompt + seps[0]
|
|
||||||
for i, (role, message) in enumerate(conv.messages):
|
|
||||||
if message:
|
|
||||||
if type(message) is tuple: # multimodal message
|
|
||||||
message, _ = message
|
|
||||||
ret += role + ": " + message + seps[i % 2]
|
|
||||||
else:
|
|
||||||
ret += role + ":"
|
|
||||||
return ret
|
|
||||||
else:
|
else:
|
||||||
return conv.get_prompt
|
ret = system_prompt + seps[0]
|
||||||
|
for i, (role, message) in enumerate(conv.messages):
|
||||||
|
if message:
|
||||||
|
if type(message) is tuple: # multimodal message
|
||||||
|
message, _ = message
|
||||||
|
ret += role + ": " + message + seps[i % 2]
|
||||||
|
else:
|
||||||
|
ret += role + ":"
|
||||||
|
return ret
|
||||||
|
else:
|
||||||
|
return conv.get_prompt
|
||||||
|
|
||||||
|
|
||||||
@wrap_gen_fn
|
@wrap_gen_fn
|
||||||
@ -197,7 +234,12 @@ def predict(
|
|||||||
return
|
return
|
||||||
|
|
||||||
conversation = generate_prompt_with_history(
|
conversation = generate_prompt_with_history(
|
||||||
text, image, history, vl_chat_processor, tokenizer, max_length=max_context_length_tokens
|
text,
|
||||||
|
image,
|
||||||
|
history,
|
||||||
|
vl_chat_processor,
|
||||||
|
tokenizer,
|
||||||
|
max_length=max_context_length_tokens,
|
||||||
)
|
)
|
||||||
prompts = convert_conversation_to_prompts(conversation)
|
prompts = convert_conversation_to_prompts(conversation)
|
||||||
|
|
||||||
@ -221,7 +263,9 @@ def predict(
|
|||||||
response = strip_stop_words(full_response, stop_words)
|
response = strip_stop_words(full_response, stop_words)
|
||||||
conversation.update_last_message(response)
|
conversation.update_last_message(response)
|
||||||
gradio_chatbot_output[-1][1] = response
|
gradio_chatbot_output[-1][1] = response
|
||||||
yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
|
yield gradio_chatbot_output, to_gradio_history(
|
||||||
|
conversation
|
||||||
|
), "Generating..."
|
||||||
|
|
||||||
print("flushed result to gradio")
|
print("flushed result to gradio")
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
@ -272,9 +316,6 @@ def retry(
|
|||||||
|
|
||||||
|
|
||||||
def build_demo(MODELS):
|
def build_demo(MODELS):
|
||||||
with open("deepseek_vl/serve/assets/custom.css", "r", encoding="utf-8") as f:
|
|
||||||
customCSS = f.read()
|
|
||||||
|
|
||||||
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
||||||
history = gr.State([])
|
history = gr.State([])
|
||||||
input_text = gr.State()
|
input_text = gr.State()
|
||||||
@ -297,7 +338,9 @@ def build_demo(MODELS):
|
|||||||
)
|
)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column(scale=4):
|
with gr.Column(scale=4):
|
||||||
text_box = gr.Textbox(show_label=False, placeholder="Enter text", container=False)
|
text_box = gr.Textbox(
|
||||||
|
show_label=False, placeholder="Enter text", container=False
|
||||||
|
)
|
||||||
with gr.Column(
|
with gr.Column(
|
||||||
min_width=70,
|
min_width=70,
|
||||||
):
|
):
|
||||||
@ -367,28 +410,28 @@ def build_demo(MODELS):
|
|||||||
|
|
||||||
examples_list = [
|
examples_list = [
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/rap.jpeg',
|
"deepseek_vl/serve/examples/rap.jpeg",
|
||||||
'Can you write me a master rap song that rhymes very well based on this image?',
|
"Can you write me a master rap song that rhymes very well based on this image?",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/app.png',
|
"deepseek_vl/serve/examples/app.png",
|
||||||
'What is this app about?',
|
"What is this app about?",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/pipeline.png',
|
"deepseek_vl/serve/examples/pipeline.png",
|
||||||
'Help me write a python code based on the image.',
|
"Help me write a python code based on the image.",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/chart.png',
|
"deepseek_vl/serve/examples/chart.png",
|
||||||
'Could you help me to re-draw this picture with python codes?',
|
"Could you help me to re-draw this picture with python codes?",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/mirror.png',
|
"deepseek_vl/serve/examples/mirror.png",
|
||||||
'How many people are there in the image. Why?',
|
"How many people are there in the image. Why?",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'deepseek_vl/serve/examples/puzzle.png',
|
"deepseek_vl/serve/examples/puzzle.png",
|
||||||
'Can this 2 pieces combine together?',
|
"Can this 2 pieces combine together?",
|
||||||
],
|
],
|
||||||
]
|
]
|
||||||
gr.Examples(examples=examples_list, inputs=[image_box, text_box])
|
gr.Examples(examples=examples_list, inputs=[image_box, text_box])
|
||||||
@ -429,7 +472,9 @@ def build_demo(MODELS):
|
|||||||
show_progress=True,
|
show_progress=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
reset_args = dict(fn=reset_textbox, inputs=[], outputs=[text_box, status_display])
|
reset_args = dict(
|
||||||
|
fn=reset_textbox, inputs=[], outputs=[text_box, status_display]
|
||||||
|
)
|
||||||
|
|
||||||
predict_events = [
|
predict_events = [
|
||||||
text_box.submit(**transfer_input_args).then(**predict_args),
|
text_box.submit(**transfer_input_args).then(**predict_args),
|
||||||
|
@ -1,3 +1,22 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
@ -11,7 +30,7 @@ def wrap_gen_fn(gen_fn):
|
|||||||
except gr.Error as g_err:
|
except gr.Error as g_err:
|
||||||
raise g_err
|
raise g_err
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise gr.Error(f'Failed to generate text: {e}') from e
|
raise gr.Error(f"Failed to generate text: {e}") from e
|
||||||
|
|
||||||
return wrapped_gen_fn
|
return wrapped_gen_fn
|
||||||
|
|
||||||
|
@ -1,3 +1,22 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@ -17,7 +36,9 @@ def compact_text_chunks(self, prompt, text_chunks: List[str]) -> List[str]:
|
|||||||
return text_splitter.split_text(combined_str)
|
return text_splitter.split_text(combined_str)
|
||||||
|
|
||||||
|
|
||||||
def postprocess(self, y: List[Tuple[str | None, str | None]]) -> List[Tuple[str | None, str | None]]:
|
def postprocess(
|
||||||
|
self, y: List[Tuple[str | None, str | None]]
|
||||||
|
) -> List[Tuple[str | None, str | None]]:
|
||||||
"""
|
"""
|
||||||
Parameters:
|
Parameters:
|
||||||
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
||||||
@ -46,11 +67,11 @@ with open("deepseek_vl/serve/assets/custom.js", "r", encoding="utf-8") as f, ope
|
|||||||
|
|
||||||
def reload_javascript():
|
def reload_javascript():
|
||||||
print("Reloading javascript...")
|
print("Reloading javascript...")
|
||||||
js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
|
js = f"<script>{customJS}</script><script>{kelpyCodos}</script>"
|
||||||
|
|
||||||
def template_response(*args, **kwargs):
|
def template_response(*args, **kwargs):
|
||||||
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
||||||
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
|
||||||
res.init_headers()
|
res.init_headers()
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@ -1,3 +1,22 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
# -*- coding:utf-8 -*-
|
# -*- coding:utf-8 -*-
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
|
@ -1,3 +1,22 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
# -*- coding:utf-8 -*-
|
# -*- coding:utf-8 -*-
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
@ -7,25 +26,28 @@ import re
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
import mdtex2html
|
import mdtex2html
|
||||||
|
from app_modules.presets import ALREADY_CONVERTED_MARK
|
||||||
from markdown import markdown
|
from markdown import markdown
|
||||||
from pygments import highlight
|
from pygments import highlight
|
||||||
from pygments.formatters import HtmlFormatter
|
from pygments.formatters import HtmlFormatter
|
||||||
from pygments.lexers import ClassNotFound, get_lexer_by_name, guess_lexer
|
from pygments.lexers import ClassNotFound, get_lexer_by_name, guess_lexer
|
||||||
|
|
||||||
from app_modules.presets import ALREADY_CONVERTED_MARK
|
logger = logging.getLogger("gradio_logger")
|
||||||
|
|
||||||
logger = logging.getLogger('gradio_logger')
|
|
||||||
|
|
||||||
|
|
||||||
def configure_logger():
|
def configure_logger():
|
||||||
logger = logging.getLogger('gradio_logger')
|
logger = logging.getLogger("gradio_logger")
|
||||||
logger.setLevel(logging.DEBUG)
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
timestr = time.strftime("%Y%m%d-%H%M%S")
|
timestr = time.strftime("%Y%m%d-%H%M%S")
|
||||||
file_handler = logging.FileHandler(f'deepseek_vl/serve/logs/{timestr}_gradio_log.log')
|
file_handler = logging.FileHandler(
|
||||||
|
f"deepseek_vl/serve/logs/{timestr}_gradio_log.log"
|
||||||
|
)
|
||||||
console_handler = logging.StreamHandler()
|
console_handler = logging.StreamHandler()
|
||||||
|
|
||||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
formatter = logging.Formatter(
|
||||||
|
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||||
|
)
|
||||||
console_handler.setFormatter(formatter)
|
console_handler.setFormatter(formatter)
|
||||||
file_handler.setFormatter(formatter)
|
file_handler.setFormatter(formatter)
|
||||||
|
|
||||||
@ -85,7 +107,9 @@ def normalize_markdown(md_text: str) -> str: # deprecated
|
|||||||
inside_list = True
|
inside_list = True
|
||||||
normalized_lines.append(line)
|
normalized_lines.append(line)
|
||||||
elif inside_list and line.strip() == "":
|
elif inside_list and line.strip() == "":
|
||||||
if i < len(lines) - 1 and not re.match(r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()):
|
if i < len(lines) - 1 and not re.match(
|
||||||
|
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
|
||||||
|
):
|
||||||
normalized_lines.append(line)
|
normalized_lines.append(line)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
@ -119,7 +143,7 @@ def convert_mdtext(md_text):
|
|||||||
|
|
||||||
|
|
||||||
def convert_asis(userinput):
|
def convert_asis(userinput):
|
||||||
return f'<p style=\"white-space:pre-wrap;\">{html.escape(userinput)}</p>{ALREADY_CONVERTED_MARK}'
|
return f'<p style="white-space:pre-wrap;">{html.escape(userinput)}</p>{ALREADY_CONVERTED_MARK}'
|
||||||
|
|
||||||
|
|
||||||
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
|
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
|
||||||
@ -188,7 +212,9 @@ def add_language_tag(text):
|
|||||||
code_block = match.group(2)
|
code_block = match.group(2)
|
||||||
if match.group(2).startswith("\n"):
|
if match.group(2).startswith("\n"):
|
||||||
language = detect_language(code_block)
|
language = detect_language(code_block)
|
||||||
return f"```{language}{code_block}```" if language else f"```\n{code_block}```"
|
return (
|
||||||
|
f"```{language}{code_block}```" if language else f"```\n{code_block}```"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return match.group(1) + code_block + "```"
|
return match.group(1) + code_block + "```"
|
||||||
|
|
||||||
|
@ -1,3 +1,24 @@
|
|||||||
|
/**
|
||||||
|
* Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
* this software and associated documentation files (the "Software"), to deal in
|
||||||
|
* the Software without restriction, including without limitation the rights to
|
||||||
|
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
* subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
// ==UserScript==
|
// ==UserScript==
|
||||||
// @name Kelpy Codos
|
// @name Kelpy Codos
|
||||||
// @namespace https://github.com/Keldos-Li/Kelpy-Codos
|
// @namespace https://github.com/Keldos-Li/Kelpy-Codos
|
||||||
|
@ -1,3 +1,24 @@
|
|||||||
|
/**
|
||||||
|
* Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
* this software and associated documentation files (the "Software"), to deal in
|
||||||
|
* the Software without restriction, including without limitation the rights to
|
||||||
|
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
* subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
:root {
|
:root {
|
||||||
--chatbot-color-light: #f3f3f3;
|
--chatbot-color-light: #f3f3f3;
|
||||||
--chatbot-color-dark: #121111;
|
--chatbot-color-dark: #121111;
|
||||||
|
@ -1 +1,22 @@
|
|||||||
|
/**
|
||||||
|
* Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
* this software and associated documentation files (the "Software"), to deal in
|
||||||
|
* the Software without restriction, including without limitation the rights to
|
||||||
|
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
* the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
* subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in all
|
||||||
|
* copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
// custom javascript here
|
// custom javascript here
|
||||||
|
@ -1,19 +1,44 @@
|
|||||||
|
# Copyright (c) 2023-2024 DeepSeek.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
# this software and associated documentation files (the "Software"), to deal in
|
||||||
|
# the Software without restriction, including without limitation the rights to
|
||||||
|
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
# subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
from transformers import (AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList,
|
from transformers import (
|
||||||
TextIteratorStreamer)
|
AutoModelForCausalLM,
|
||||||
|
StoppingCriteria,
|
||||||
|
StoppingCriteriaList,
|
||||||
|
TextIteratorStreamer,
|
||||||
|
)
|
||||||
|
|
||||||
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
|
from deepseek_vl.models import MultiModalityCausalLM, VLChatProcessor
|
||||||
from deepseek_vl.utils.conversation import Conversation
|
from deepseek_vl.utils.conversation import Conversation
|
||||||
|
|
||||||
|
|
||||||
def load_model(model_path):
|
def load_model(model_path):
|
||||||
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
||||||
tokenizer = vl_chat_processor.tokenizer
|
tokenizer = vl_chat_processor.tokenizer
|
||||||
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_path, trust_remote_code=True
|
||||||
|
)
|
||||||
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
||||||
return tokenizer, vl_gpt, vl_chat_processor
|
return tokenizer, vl_gpt, vl_chat_processor
|
||||||
|
|
||||||
@ -25,7 +50,9 @@ def convert_conversation_to_prompts(conversation: Conversation):
|
|||||||
for i in range(0, len(messages), 2):
|
for i in range(0, len(messages), 2):
|
||||||
prompt = {
|
prompt = {
|
||||||
"role": messages[i][0],
|
"role": messages[i][0],
|
||||||
"content": messages[i][1][0] if isinstance(messages[i][1], tuple) else messages[i][1],
|
"content": messages[i][1][0]
|
||||||
|
if isinstance(messages[i][1], tuple)
|
||||||
|
else messages[i][1],
|
||||||
"images": [messages[i][1][1]] if isinstance(messages[i][1], tuple) else [],
|
"images": [messages[i][1][1]] if isinstance(messages[i][1], tuple) else [],
|
||||||
}
|
}
|
||||||
response = {"role": messages[i + 1][0], "content": messages[i + 1][1]}
|
response = {"role": messages[i + 1][0], "content": messages[i + 1][1]}
|
||||||
@ -39,7 +66,9 @@ class StoppingCriteriaSub(StoppingCriteria):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.stops = [stop.to("cuda") for stop in stops]
|
self.stops = [stop.to("cuda") for stop in stops]
|
||||||
|
|
||||||
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):
|
def __call__(
|
||||||
|
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
|
||||||
|
):
|
||||||
for stop in self.stops:
|
for stop in self.stops:
|
||||||
if input_ids.shape[-1] < len(stop):
|
if input_ids.shape[-1] < len(stop):
|
||||||
continue
|
continue
|
||||||
@ -70,9 +99,7 @@ def deepseek_generate(
|
|||||||
pil_images.append(pil_img)
|
pil_images.append(pil_img)
|
||||||
|
|
||||||
prepare_inputs = vl_chat_processor(
|
prepare_inputs = vl_chat_processor(
|
||||||
conversations=prompts,
|
conversations=prompts, images=pil_images, force_batchify=True
|
||||||
images=pil_images,
|
|
||||||
force_batchify=True
|
|
||||||
).to(vl_gpt.device)
|
).to(vl_gpt.device)
|
||||||
|
|
||||||
return generate(
|
return generate(
|
||||||
@ -106,7 +133,9 @@ def generate(
|
|||||||
stop_words_ids = [
|
stop_words_ids = [
|
||||||
torch.tensor(tokenizer.encode(stop_word)) for stop_word in stop_words
|
torch.tensor(tokenizer.encode(stop_word)) for stop_word in stop_words
|
||||||
]
|
]
|
||||||
stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
|
stopping_criteria = StoppingCriteriaList(
|
||||||
|
[StoppingCriteriaSub(stops=stop_words_ids)]
|
||||||
|
)
|
||||||
|
|
||||||
generation_config = dict(
|
generation_config = dict(
|
||||||
inputs_embeds=inputs_embeds,
|
inputs_embeds=inputs_embeds,
|
||||||
|
20
inference.py
20
inference.py
@ -1,37 +1,33 @@
|
|||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM
|
from transformers import AutoModelForCausalLM
|
||||||
|
|
||||||
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
|
from deepseek_vl.models import MultiModalityCausalLM, VLChatProcessor
|
||||||
from deepseek_vl.utils.io import load_pil_images
|
from deepseek_vl.utils.io import load_pil_images
|
||||||
|
|
||||||
|
|
||||||
# specify the path to the model
|
# specify the path to the model
|
||||||
model_path = "deepseek-ai/deepseek-vl-7b-chat"
|
model_path = "deepseek-ai/deepseek-vl-7b-chat"
|
||||||
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
||||||
tokenizer = vl_chat_processor.tokenizer
|
tokenizer = vl_chat_processor.tokenizer
|
||||||
|
|
||||||
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
|
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_path, trust_remote_code=True
|
||||||
|
)
|
||||||
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
||||||
|
|
||||||
conversation = [
|
conversation = [
|
||||||
{
|
{
|
||||||
"role": "User",
|
"role": "User",
|
||||||
"content": "<image_placeholder>Describe each stage of this image.",
|
"content": "<image_placeholder>Describe each stage of this image.",
|
||||||
"images": ["./images/training_pipelines.jpg"]
|
"images": ["./images/training_pipelines.jpg"],
|
||||||
},
|
},
|
||||||
{
|
{"role": "Assistant", "content": ""},
|
||||||
"role": "Assistant",
|
|
||||||
"content": ""
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# load images and prepare for inputs
|
# load images and prepare for inputs
|
||||||
pil_images = load_pil_images(conversation)
|
pil_images = load_pil_images(conversation)
|
||||||
prepare_inputs = vl_chat_processor(
|
prepare_inputs = vl_chat_processor(
|
||||||
conversations=conversation,
|
conversations=conversation, images=pil_images, force_batchify=True
|
||||||
images=pil_images,
|
|
||||||
force_batchify=True
|
|
||||||
).to(vl_gpt.device)
|
).to(vl_gpt.device)
|
||||||
|
|
||||||
# run image encoder to get the image embeddings
|
# run image encoder to get the image embeddings
|
||||||
@ -46,7 +42,7 @@ outputs = vl_gpt.language_model.generate(
|
|||||||
eos_token_id=tokenizer.eos_token_id,
|
eos_token_id=tokenizer.eos_token_id,
|
||||||
max_new_tokens=512,
|
max_new_tokens=512,
|
||||||
do_sample=False,
|
do_sample=False,
|
||||||
use_cache=True
|
use_cache=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
||||||
|
Loading…
Reference in New Issue
Block a user