chore: make format

This commit is contained in:
Bo Liu 2024-03-13 14:39:46 +08:00
parent 90a18501d7
commit 48cc0deea6
13 changed files with 345 additions and 113 deletions

View File

@ -4,7 +4,7 @@ COPYRIGHT = "DeepSeek."
PROJECT_PATH = deepseek_vl
SHELL = /bin/bash
SOURCE_FOLDERS = deepseek_vl
PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi")
PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi") cli_chat.py inference.py
COMMIT_HASH = $(shell git log -1 --format=%h)
PATH := $(HOME)/go/bin:$(PATH)
PYTHON ?= $(shell command -v python3 || command -v python)

View File

@ -192,7 +192,7 @@ This code repository is licensed under [the MIT License](https://github.com/deep
```
@misc{lu2024deepseekvl,
title={DeepSeek-VL: Towards Real-World Vision-Language Understanding},
author={Haoyu Lu and Wen Liu and Bo Zhang and Bingxuan Wang and Kai Dong and Bo Liu and Jingxiang Sun and Tongzheng Ren and Zhuoshu Li and Yaofeng Sun and Chengqi Deng and Hanwei Xu and Zhenda Xie and Chong Ruan},
author={Haoyu Lu and Wen Liu and Bo Zhang and Bingxuan Wang and Kai Dong and Bo Liu and Jingxiang Sun and Tongzheng Ren and Zhuoshu Li and Hao Yang and Yaofeng Sun and Chengqi Deng and Hanwei Xu and Zhenda Xie and Chong Ruan},
year={2024},
eprint={2403.05525},
archivePrefix={arXiv},

View File

@ -3,9 +3,10 @@
import argparse
import os
import sys
from PIL import Image
from threading import Thread
import torch
from PIL import Image
from transformers import TextIteratorStreamer
from deepseek_vl.utils.io import load_pretrained_model
@ -33,22 +34,19 @@ def get_help_message(image_token):
@torch.inference_mode()
def response(args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config):
def response(
args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config
):
prompt = conv.get_prompt()
prepare_inputs = vl_chat_processor.__call__(
prompt=prompt,
images=pil_images,
force_batchify=True
prompt=prompt, images=pil_images, force_batchify=True
).to(vl_gpt.device)
# run image encoder to get the image embeddings
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
streamer = TextIteratorStreamer(
tokenizer=tokenizer,
skip_prompt=True,
skip_special_tokens=True
tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True
)
generation_config["inputs_embeds"] = inputs_embeds
generation_config["attention_mask"] = prepare_inputs.attention_mask
@ -79,7 +77,6 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
help_msg = get_help_message(image_token)
while True:
print(help_msg)
pil_images = []
@ -87,9 +84,10 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
roles = conv.roles
while True:
# get user input
user_input = get_user_input(f"{roles[0]} [{image_token} indicates an image]: ")
user_input = get_user_input(
f"{roles[0]} [{image_token} indicates an image]: "
)
if user_input == "exit":
print("Chat program exited.")
@ -115,7 +113,9 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
while cur_img_idx < num_images:
try:
image_file = input(f"({cur_img_idx + 1}/{num_images}) Input the image file path: ")
image_file = input(
f"({cur_img_idx + 1}/{num_images}) Input the image file path: "
)
except KeyboardInterrupt:
print()
@ -134,11 +134,21 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
sys.exit(0)
else:
print(f"File error, `{image_file}` does not exist. Please input the correct file path.")
print(
f"File error, `{image_file}` does not exist. Please input the correct file path."
)
# get the answer by the model's prediction
answer = ""
answer_iter = response(args, conv, pil_images, tokenizer, vl_chat_processor, vl_gpt, generation_config)
answer_iter = response(
args,
conv,
pil_images,
tokenizer,
vl_chat_processor,
vl_gpt,
generation_config,
)
sys.stdout.write(f"{conv.roles[1]}: ")
for char in answer_iter:
answer += char
@ -152,7 +162,6 @@ def chat(args, tokenizer, vl_chat_processor, vl_gpt, generation_config):
def main(args):
# setup
tokenizer, vl_chat_processor, vl_gpt = load_pretrained_model(args.model_path)
generation_config = dict(
@ -163,12 +172,14 @@ def main(args):
use_cache=True,
)
if args.temperature > 0:
generation_config.update({
"do_sample": True,
"top_p": args.top_p,
"temperature": args.temperature,
"repetition_penalty": args.repetition_penalty,
})
generation_config.update(
{
"do_sample": True,
"top_p": args.top_p,
"temperature": args.temperature,
"repetition_penalty": args.repetition_penalty,
}
)
else:
generation_config.update({"do_sample": False})
@ -177,12 +188,15 @@ def main(args):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default="deepseek-ai/deepseek-vl-7b-chat",
help="the huggingface model name or the local path of the downloaded huggingface model.")
parser.add_argument(
"--model_path",
type=str,
default="deepseek-ai/deepseek-vl-7b-chat",
help="the huggingface model name or the local path of the downloaded huggingface model.",
)
parser.add_argument("--temperature", type=float, default=0.2)
parser.add_argument("--top_p", type=float, default=0.95)
parser.add_argument("--repetition_penalty", type=float, default=1.1)
parser.add_argument("--max_gen_len", type=int, default=512)
args = parser.parse_args()
main(args)

View File

@ -1,17 +1,46 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding:utf-8 -*-
import gradio as gr
import torch
import base64
from io import BytesIO
from app_modules.gradio_utils import (cancel_outputing, delete_last_conversation, reset_state,
reset_textbox, transfer_input, wrap_gen_fn)
import gradio as gr
import torch
from app_modules.gradio_utils import (
cancel_outputing,
delete_last_conversation,
reset_state,
reset_textbox,
transfer_input,
wrap_gen_fn,
)
from app_modules.overwrites import reload_javascript
from app_modules.presets import CONCURRENT_COUNT, description, description_top, title
from app_modules.utils import (configure_logger, is_variable_assigned,
strip_stop_words)
from deepseek_vl.serve.inference import convert_conversation_to_prompts, deepseek_generate, load_model
from app_modules.utils import configure_logger, is_variable_assigned, strip_stop_words
from deepseek_vl.serve.inference import (
convert_conversation_to_prompts,
deepseek_generate,
load_model,
)
from deepseek_vl.utils.conversation import SeparatorStyle
@ -31,7 +60,9 @@ models = load_models()
MODELS = sorted(list(models.keys()))
def generate_prompt_with_history(text, image, history, vl_chat_processor, tokenizer, max_length=2048):
def generate_prompt_with_history(
text, image, history, vl_chat_processor, tokenizer, max_length=2048
):
"""
Generate a prompt with history for the deepseek application.
@ -57,8 +88,10 @@ def generate_prompt_with_history(text, image, history, vl_chat_processor, tokeni
conversation.messages = history
if image is not None:
if '<image_placeholder>' not in text:
text = '<image_placeholder>' + '\n' + text # append the <image_placeholder> in a new line after the text prompt
if "<image_placeholder>" not in text:
text = (
"<image_placeholder>" + "\n" + text
) # append the <image_placeholder> in a new line after the text prompt
text = (text, image)
conversation.append_message(conversation.roles[user_role_ind], text)
@ -73,7 +106,11 @@ def generate_prompt_with_history(text, image, history, vl_chat_processor, tokeni
for _ in range(rounds):
current_prompt = get_prompt(conversation)
current_prompt = current_prompt.replace("</s>", "") if sft_format == "deepseek" else current_prompt
current_prompt = (
current_prompt.replace("</s>", "")
if sft_format == "deepseek"
else current_prompt
)
if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
return conversation_copy
@ -101,11 +138,11 @@ def to_gradio_chatbot(conv):
if type(msg) is tuple:
msg, image = msg
if isinstance(image, str):
with open(image, 'rb') as f:
with open(image, "rb") as f:
data = f.read()
img_b64_str = base64.b64encode(data).decode()
image_str = f'<video src="data:video/mp4;base64,{img_b64_str}" controls width="426" height="240"></video>'
msg = msg.replace('\n'.join(['<image_placeholder>'] * 4), image_str)
msg = msg.replace("\n".join(["<image_placeholder>"] * 4), image_str)
else:
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
@ -122,7 +159,7 @@ def to_gradio_chatbot(conv):
image.save(buffered, format="JPEG")
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
msg = msg.replace('<image_placeholder>', img_str)
msg = msg.replace("<image_placeholder>", img_str)
ret.append([msg, None])
else:
ret[-1][-1] = msg
@ -135,24 +172,24 @@ def to_gradio_history(conv):
def get_prompt(conv) -> str:
"""Get the prompt for generation."""
system_prompt = conv.system_template.format(system_message=conv.system_message)
if conv.sep_style == SeparatorStyle.DeepSeek:
seps = [conv.sep, conv.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(conv.messages):
if message:
if type(message) is tuple: # multimodal message
message, _ = message
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
"""Get the prompt for generation."""
system_prompt = conv.system_template.format(system_message=conv.system_message)
if conv.sep_style == SeparatorStyle.DeepSeek:
seps = [conv.sep, conv.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
return conv.get_prompt
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(conv.messages):
if message:
if type(message) is tuple: # multimodal message
message, _ = message
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
else:
return conv.get_prompt
@wrap_gen_fn
@ -197,7 +234,12 @@ def predict(
return
conversation = generate_prompt_with_history(
text, image, history, vl_chat_processor, tokenizer, max_length=max_context_length_tokens
text,
image,
history,
vl_chat_processor,
tokenizer,
max_length=max_context_length_tokens,
)
prompts = convert_conversation_to_prompts(conversation)
@ -221,7 +263,9 @@ def predict(
response = strip_stop_words(full_response, stop_words)
conversation.update_last_message(response)
gradio_chatbot_output[-1][1] = response
yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..."
yield gradio_chatbot_output, to_gradio_history(
conversation
), "Generating..."
print("flushed result to gradio")
torch.cuda.empty_cache()
@ -272,9 +316,6 @@ def retry(
def build_demo(MODELS):
with open("deepseek_vl/serve/assets/custom.css", "r", encoding="utf-8") as f:
customCSS = f.read()
with gr.Blocks(theme=gr.themes.Soft()) as demo:
history = gr.State([])
input_text = gr.State()
@ -297,7 +338,9 @@ def build_demo(MODELS):
)
with gr.Row():
with gr.Column(scale=4):
text_box = gr.Textbox(show_label=False, placeholder="Enter text", container=False)
text_box = gr.Textbox(
show_label=False, placeholder="Enter text", container=False
)
with gr.Column(
min_width=70,
):
@ -367,28 +410,28 @@ def build_demo(MODELS):
examples_list = [
[
'deepseek_vl/serve/examples/rap.jpeg',
'Can you write me a master rap song that rhymes very well based on this image?',
"deepseek_vl/serve/examples/rap.jpeg",
"Can you write me a master rap song that rhymes very well based on this image?",
],
[
'deepseek_vl/serve/examples/app.png',
'What is this app about?',
"deepseek_vl/serve/examples/app.png",
"What is this app about?",
],
[
'deepseek_vl/serve/examples/pipeline.png',
'Help me write a python code based on the image.',
"deepseek_vl/serve/examples/pipeline.png",
"Help me write a python code based on the image.",
],
[
'deepseek_vl/serve/examples/chart.png',
'Could you help me to re-draw this picture with python codes?',
"deepseek_vl/serve/examples/chart.png",
"Could you help me to re-draw this picture with python codes?",
],
[
'deepseek_vl/serve/examples/mirror.png',
'How many people are there in the image. Why?',
"deepseek_vl/serve/examples/mirror.png",
"How many people are there in the image. Why?",
],
[
'deepseek_vl/serve/examples/puzzle.png',
'Can this 2 pieces combine together?',
"deepseek_vl/serve/examples/puzzle.png",
"Can this 2 pieces combine together?",
],
]
gr.Examples(examples=examples_list, inputs=[image_box, text_box])
@ -429,7 +472,9 @@ def build_demo(MODELS):
show_progress=True,
)
reset_args = dict(fn=reset_textbox, inputs=[], outputs=[text_box, status_display])
reset_args = dict(
fn=reset_textbox, inputs=[], outputs=[text_box, status_display]
)
predict_events = [
text_box.submit(**transfer_input_args).then(**predict_args),

View File

@ -1,3 +1,22 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from functools import wraps
import gradio as gr
@ -11,7 +30,7 @@ def wrap_gen_fn(gen_fn):
except gr.Error as g_err:
raise g_err
except Exception as e:
raise gr.Error(f'Failed to generate text: {e}') from e
raise gr.Error(f"Failed to generate text: {e}") from e
return wrapped_gen_fn

View File

@ -1,3 +1,22 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import logging
@ -17,7 +36,9 @@ def compact_text_chunks(self, prompt, text_chunks: List[str]) -> List[str]:
return text_splitter.split_text(combined_str)
def postprocess(self, y: List[Tuple[str | None, str | None]]) -> List[Tuple[str | None, str | None]]:
def postprocess(
self, y: List[Tuple[str | None, str | None]]
) -> List[Tuple[str | None, str | None]]:
"""
Parameters:
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
@ -46,11 +67,11 @@ with open("deepseek_vl/serve/assets/custom.js", "r", encoding="utf-8") as f, ope
def reload_javascript():
print("Reloading javascript...")
js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
js = f"<script>{customJS}</script><script>{kelpyCodos}</script>"
def template_response(*args, **kwargs):
res = GradioTemplateResponseOriginal(*args, **kwargs)
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
res.init_headers()
return res

View File

@ -1,3 +1,22 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding:utf-8 -*-
import gradio as gr

View File

@ -1,3 +1,22 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding:utf-8 -*-
from __future__ import annotations
@ -7,25 +26,28 @@ import re
import time
import mdtex2html
from app_modules.presets import ALREADY_CONVERTED_MARK
from markdown import markdown
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import ClassNotFound, get_lexer_by_name, guess_lexer
from app_modules.presets import ALREADY_CONVERTED_MARK
logger = logging.getLogger('gradio_logger')
logger = logging.getLogger("gradio_logger")
def configure_logger():
logger = logging.getLogger('gradio_logger')
logger = logging.getLogger("gradio_logger")
logger.setLevel(logging.DEBUG)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_handler = logging.FileHandler(f'deepseek_vl/serve/logs/{timestr}_gradio_log.log')
file_handler = logging.FileHandler(
f"deepseek_vl/serve/logs/{timestr}_gradio_log.log"
)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
@ -85,7 +107,9 @@ def normalize_markdown(md_text: str) -> str: # deprecated
inside_list = True
normalized_lines.append(line)
elif inside_list and line.strip() == "":
if i < len(lines) - 1 and not re.match(r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()):
if i < len(lines) - 1 and not re.match(
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
):
normalized_lines.append(line)
continue
else:
@ -119,7 +143,7 @@ def convert_mdtext(md_text):
def convert_asis(userinput):
return f'<p style=\"white-space:pre-wrap;\">{html.escape(userinput)}</p>{ALREADY_CONVERTED_MARK}'
return f'<p style="white-space:pre-wrap;">{html.escape(userinput)}</p>{ALREADY_CONVERTED_MARK}'
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
@ -188,7 +212,9 @@ def add_language_tag(text):
code_block = match.group(2)
if match.group(2).startswith("\n"):
language = detect_language(code_block)
return f"```{language}{code_block}```" if language else f"```\n{code_block}```"
return (
f"```{language}{code_block}```" if language else f"```\n{code_block}```"
)
else:
return match.group(1) + code_block + "```"

View File

@ -1,3 +1,24 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// ==UserScript==
// @name Kelpy Codos
// @namespace https://github.com/Keldos-Li/Kelpy-Codos

View File

@ -1,3 +1,24 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
:root {
--chatbot-color-light: #f3f3f3;
--chatbot-color-dark: #121111;

View File

@ -1 +1,22 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// custom javascript here

View File

@ -1,19 +1,44 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from threading import Thread
from typing import List
import torch
import transformers
from transformers import (AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList,
TextIteratorStreamer)
from transformers import (
AutoModelForCausalLM,
StoppingCriteria,
StoppingCriteriaList,
TextIteratorStreamer,
)
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
from deepseek_vl.models import MultiModalityCausalLM, VLChatProcessor
from deepseek_vl.utils.conversation import Conversation
def load_model(model_path):
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
return tokenizer, vl_gpt, vl_chat_processor
@ -25,7 +50,9 @@ def convert_conversation_to_prompts(conversation: Conversation):
for i in range(0, len(messages), 2):
prompt = {
"role": messages[i][0],
"content": messages[i][1][0] if isinstance(messages[i][1], tuple) else messages[i][1],
"content": messages[i][1][0]
if isinstance(messages[i][1], tuple)
else messages[i][1],
"images": [messages[i][1][1]] if isinstance(messages[i][1], tuple) else [],
}
response = {"role": messages[i + 1][0], "content": messages[i + 1][1]}
@ -39,7 +66,9 @@ class StoppingCriteriaSub(StoppingCriteria):
super().__init__()
self.stops = [stop.to("cuda") for stop in stops]
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs):
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
):
for stop in self.stops:
if input_ids.shape[-1] < len(stop):
continue
@ -70,9 +99,7 @@ def deepseek_generate(
pil_images.append(pil_img)
prepare_inputs = vl_chat_processor(
conversations=prompts,
images=pil_images,
force_batchify=True
conversations=prompts, images=pil_images, force_batchify=True
).to(vl_gpt.device)
return generate(
@ -106,7 +133,9 @@ def generate(
stop_words_ids = [
torch.tensor(tokenizer.encode(stop_word)) for stop_word in stop_words
]
stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
stopping_criteria = StoppingCriteriaList(
[StoppingCriteriaSub(stops=stop_words_ids)]
)
generation_config = dict(
inputs_embeds=inputs_embeds,

View File

@ -1,37 +1,33 @@
import torch
from transformers import AutoModelForCausalLM
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
from deepseek_vl.models import MultiModalityCausalLM, VLChatProcessor
from deepseek_vl.utils.io import load_pil_images
# specify the path to the model
model_path = "deepseek-ai/deepseek-vl-7b-chat"
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
conversation = [
{
"role": "User",
"content": "<image_placeholder>Describe each stage of this image.",
"images": ["./images/training_pipelines.jpg"]
"images": ["./images/training_pipelines.jpg"],
},
{
"role": "Assistant",
"content": ""
}
{"role": "Assistant", "content": ""},
]
# load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
conversations=conversation,
images=pil_images,
force_batchify=True
conversations=conversation, images=pil_images, force_batchify=True
).to(vl_gpt.device)
# run image encoder to get the image embeddings
@ -46,7 +42,7 @@ outputs = vl_gpt.language_model.generate(
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
do_sample=False,
use_cache=True
use_cache=True,
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)