Initial commit

This commit is contained in:
Xingchao Liu 2024-12-13 20:38:59 +08:00
commit d527122a4d
53 changed files with 7531 additions and 0 deletions

42
.editorconfig Normal file
View File

@ -0,0 +1,42 @@
# https://editorconfig.org/
root = true
[*]
charset = utf-8
end_of_line = lf
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true
[*.py]
indent_size = 4
src_paths=evaluation
[*.{yaml,yml,json}]
indent_size = 2
[*.md]
indent_size = 2
x-soft-wrap-text = true
[*.rst]
indent_size = 4
x-soft-wrap-text = true
[*.{bib,tex}]
indent_size = 2
[Makefile]
indent_style = tab
[*.sh]
indent_style = tab
[*.bat]
end_of_line = crlf
indent_style = tab
[*.{cpp,h,cu,cuh}]
indent_size = 2

40
.flake8 Normal file
View File

@ -0,0 +1,40 @@
[flake8]
max-line-length = 120
max-doc-length = 100
select = B,C,E,F,W,Y,SIM
ignore =
# E203: whitespace before ':'
# W503: line break before binary operator
# W504: line break after binary operator
# format by black
E203,W503,W504,
# E501: line too long
# W505: doc line too long
# too long docstring due to long example blocks
E501,W505,
per-file-ignores =
# F401: module imported but unused
# intentionally unused imports
__init__.py: F401
# F401: module imported but unused
# F403: unable to detect undefined names
# F405: member mey be undefined, or defined from star imports
# members populated from optree
# E301: expected 1 blank line
# E302: expected 2 blank lines
# E305: expected 2 blank lines after class or function definition
# E701: multiple statements on one line (colon)
# E704: multiple statements on one line (def)
# format by black
*.pyi: E301,E302,E305,E701,E704
exclude =
.git,
.vscode,
venv,
third-party,
__pycache__,
docs/source/conf.py,
build,
dist,
examples,
tests

8
.gitattributes vendored Normal file
View File

@ -0,0 +1,8 @@
* text eol=lf
*.ipynb linguist-detectable=false
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.pdf binary

415
.gitignore vendored Normal file
View File

@ -0,0 +1,415 @@
##### Python.gitignore #####
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
wheelhouse/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
*.whl
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
docs/source/_build/
_autosummary/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# ruff
.ruff_cache/
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
##### macOS.gitignore #####
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
##### Linux.gitignore #####
*~
# Temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
##### Windows.gitignore #####
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
##### Archives.gitignore #####
# It's better to unpack these files and commit the raw source because
# git has its own built in compression methods.
*.7z
*.jar
*.rar
*.zip
*.gz
*.gzip
*.tgz
*.bzip
*.bzip2
*.bz2
*.xz
*.lzma
*.cab
*.xar
# Packing-only formats
*.iso
*.tar
# Package management formats
*.dmg
*.xpi
*.gem
*.egg
*.deb
*.rpm
*.msi
*.msm
*.msp
*.txz
##### Xcode.gitignore #####
# Xcode
#
# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
## User settings
xcuserdata/
## Compatibility with Xcode 8 and earlier (ignoring not required starting Xcode 9)
*.xcscmblueprint
*.xccheckout
## Compatibility with Xcode 3 and earlier (ignoring not required starting Xcode 4)
build/
DerivedData/
*.moved-aside
*.pbxuser
!default.pbxuser
*.mode1v3
!default.mode1v3
*.mode2v3
!default.mode2v3
*.perspectivev3
!default.perspectivev3
## Gcc Patch
/*.gcno
##### JetBrains.gitignore #####
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User settings
.idea/*
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
##### VisualStudioCode.gitignore #####
.vscode/*
# !.vscode/settings.json
# !.vscode/tasks.json
# !.vscode/launch.json
!.vscode/extensions.json
*.code-workspace
# Local History for Visual Studio Code
.history/
##### Vim.gitignore #####
# Swap
.*.s[a-v][a-z]
!*.svg # comment out if you don't need vector files
.*.sw[a-p]
.s[a-rt-v][a-z]
.ss[a-gi-z]
.sw[a-p]
# Session
Session.vim
Sessionx.vim
# Temporary
.netrwhist
*~
# Auto-generated tag files
tags
# Persistent undo
[._]*.un~

75
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,75 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
ci:
skip: [pylint]
autofix_prs: true
autofix_commit_msg: "fix: [pre-commit.ci] auto fixes [...]"
autoupdate_commit_msg: "chore(pre-commit): [pre-commit.ci] autoupdate"
autoupdate_schedule: monthly
default_stages: [commit, push, manual]
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-symlinks
- id: destroyed-symlinks
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-toml
- id: check-ast
- id: check-added-large-files
- id: check-merge-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: detect-private-key
- id: debug-statements
- id: double-quote-string-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.5
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- repo: https://github.com/PyCQA/isort
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: 23.11.0
hooks:
- id: black-jupyter
- repo: https://github.com/asottile/pyupgrade
rev: v3.15.0
hooks:
- id: pyupgrade
args: [--py38-plus] # sync with requires-python
exclude: |
(?x)(
^images/
)
- repo: https://github.com/pycqa/flake8
rev: 6.1.0
hooks:
- id: flake8
additional_dependencies:
- flake8-bugbear
- flake8-comprehensions
- flake8-docstrings
- flake8-pyi
- flake8-simplify
exclude: |
(?x)(
^images/
)
- repo: local
hooks:
- id: pylint
name: pylint
entry: pylint
language: system
types: [python]
require_serial: true
exclude: |
(?x)(
^images/
)

629
.pylintrc Normal file
View File

@ -0,0 +1,629 @@
[MAIN]
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Load and enable all available extensions. Use --list-extensions to see a list
# all available extensions.
#enable-all-extensions=
# In error mode, messages with a category besides ERROR or FATAL are
# suppressed, and no reports are done by default. Error mode is compatible with
# disabling specific errors.
#errors-only=
# Always return a 0 (non-error) status code, even if lint errors are found.
# This is primarily useful in continuous integration scripts.
#exit-zero=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-allow-list=
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
# for backward compatibility.)
extension-pkg-whitelist=
# Return non-zero exit code if any of these messages/categories are detected,
# even if score is above --fail-under value. Syntax same as enable. Messages
# specified are enabled, while categories only check already-enabled messages.
fail-on=
# Specify a score threshold under which the program will exit with error.
fail-under=10
# Interpret the stdin as a python script, whose filename needs to be passed as
# the module_or_package argument.
#from-stdin=
# Files or directories to be skipped. They should be base names, not paths.
ignore=CVS,.vscode,.history
# Add files or directories matching the regular expressions patterns to the
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\' represents the directory delimiter on Windows systems, it
# can't be used as an escape character.
ignore-paths=^images/$
# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
# Emacs file locks
ignore-patterns=^\.#
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use, and will cap the count on Windows to
# avoid hangs.
jobs=0
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Minimum Python version to use for version dependent checks. Will default to
# the version used to run pylint.
py-version=3.8 # the lowest version we support (sync with requires-python in pyproject.toml)
# Discover python modules and packages in the file system subtree.
recursive=no
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# In verbose mode, extra non-checker-related info will be displayed.
#verbose=
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style. If left empty, argument names will be checked with the set
# naming style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style. If left empty, attribute names will be checked with the set naming
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Bad variable names regexes, separated by a comma. If names match any regex,
# they will always be refused
bad-names-rgxs=
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style. If left empty, class attribute names will be checked
# with the set naming style.
#class-attribute-rgx=
# Naming style matching correct class constant names.
class-const-naming-style=UPPER_CASE
# Regular expression matching correct class constant names. Overrides class-
# const-naming-style. If left empty, class constant names will be checked with
# the set naming style.
#class-const-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style. If left empty, class names will be checked with the set naming style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style. If left empty, constant names will be checked with the set naming
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style. If left empty, function names will be checked with the set
# naming style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_,
op,
fn,
f,
g,
p,
u,
t,
lr,
mu,
nu,
x,
y
# Good variable names regexes, separated by a comma. If names match any regex,
# they will always be accepted
good-names-rgxs=
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style. If left empty, inline iteration names will be checked
# with the set naming style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style. If left empty, method names will be checked with the set naming style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style. If left empty, module names will be checked with the set naming style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Regular expression matching correct type variable names. If left empty, type
# variable names will be checked with the set naming style.
#typevar-rgx=
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style. If left empty, variable names will be checked with the set
# naming style.
#variable-rgx=
[CLASSES]
# Warn about protected attribute access inside special methods
check-protected-access-in-special-methods=no
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[DESIGN]
# List of regular expressions of class ancestor names to ignore when counting
# public methods (see R0903)
exclude-too-few-public-methods=
# List of qualified class names to ignore when counting class parents (see
# R0901)
ignored-parents=
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when caught.
overgeneral-exceptions=builtins.BaseException,
builtins.Exception
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=120
# Maximum number of lines in a module.
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=
# Output a graph (.gv or any supported image format) of external dependencies
# to the given file (report RP0402 must not be disabled).
ext-import-graph=
# Output a graph (.gv or any supported image format) of all (i.e. internal and
# external) dependencies to the given file (report RP0402 must not be
# disabled).
import-graph=
# Output a graph (.gv or any supported image format) of internal dependencies
# to the given file (report RP0402 must not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[LOGGING]
# The type of string formatting that logging methods do. `old` means using %
# formatting, `new` is for `{}` formatting.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
# UNDEFINED.
confidence=HIGH,
CONTROL_FLOW,
INFERENCE,
INFERENCE_FAILURE,
UNDEFINED
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=duplicate-code,
consider-using-from-import
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[METHOD_ARGS]
# List of qualified names (i.e., library.method) which require a timeout
# parameter e.g. 'requests.api.get,requests.api.post'
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
# Regular expression of note tags to take in consideration.
notes-rgx=
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit,argparse.parse_error
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
# 'convention', and 'info' which contain the number of messages in each
# category, as well as 'statement' which is the total number of statements
# analyzed. This score is used by the global evaluation report (RP0004).
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
#output-format=
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[SIMILARITIES]
# Comments are removed from the similarity computation
ignore-comments=yes
# Docstrings are removed from the similarity computation
ignore-docstrings=yes
# Imports are removed from the similarity computation
ignore-imports=yes
# Signatures are removed from the similarity computation
ignore-signatures=yes
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: en_AU (hunspell), en_CA
# (hunspell), en_GB (hunspell), en_US (hunspell), en_ZA (hunspell).
spelling-dict=
# List of comma separated words that should be considered directives if they
# appear at the beginning of a comment and should not be checked.
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=docs/source/spelling_wordlist.txt
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=no
# This flag controls whether the implicit-str-concat should generate a warning
# on implicit string concatenation in sequences defined over several lines.
check-str-concat-over-line-jumps=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=numpy.*,
torch.*
# Tells whether missing members accessed in mixin class should be ignored. A
# class is considered mixin if its name matches the mixin-class-rgx option.
ignore-mixin-members=yes
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of symbolic message names to ignore for Mixin members.
ignored-checks-for-mixins=no-member,
not-async-context-manager,
not-context-manager,
attribute-defined-outside-init
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# Regex pattern to define which classes are considered mixins.
mixin-class-rgx=.*[Mm]ixin
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of names allowed to shadow builtins
allowed-redefined-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io

BIN
DeepSeek_VL2_paper.pdf Normal file

Binary file not shown.

21
LICENSE-CODE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 DeepSeek
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

91
LICENSE-MODEL Normal file
View File

@ -0,0 +1,91 @@
DEEPSEEK LICENSE AGREEMENT
Version 1.0, 23 October 2023
Copyright (c) 2023 DeepSeek
Section I: PREAMBLE
Large generative models are being widely adopted and used, and have the potential to transform the way individuals conceive and benefit from AI or ML technologies.
Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for content generation.
Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this agreement aims to strike a balance between both in order to enable responsible open-science in the field of AI.
This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
NOW THEREFORE, You and DeepSeek agree as follows:
1. Definitions
"License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
"Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
"Output" means the results of operating a Model as embodied in informational content resulting therefrom.
"Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
"Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
"Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
"Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
"DeepSeek" (or "we") means Beijing DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd., Hangzhou DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd. and/or any of their affiliates.
"You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, etc.
"Third Parties" means individuals or legal entities that are not under common control with DeepSeek or You.
Section II: INTELLECTUAL PROPERTY RIGHTS
Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
2. Grant of Copyright License. Subject to the terms and conditions of this License, DeepSeek hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, DeepSeek hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by DeepSeek that are necessarily infringed by its contribution(s). If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or works shall terminate as of the date such litigation is asserted or filed.
Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
a. Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
b. You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
c. You must cause any modified files to carry prominent notices stating that You changed the files;
d. You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
e. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
6. The Output You Generate. Except as set forth herein, DeepSeek claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
Section IV: OTHER PROVISIONS
7. Updates and Runtime Restrictions. To the maximum extent permitted by law, DeepSeek reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License.
8. Trademarks and related. Nothing in this License permits You to make use of DeepSeek trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by DeepSeek.
9. Personal information, IP rights and related. This Model may contain personal information and works with IP rights. You commit to complying with applicable laws and regulations in the handling of personal information and the use of such works. Please note that DeepSeek's license granted to you to use the Model does not imply that you have obtained a legitimate basis for processing the related information or works. As an independent personal information processor and IP rights user, you need to ensure full compliance with relevant legal and regulatory requirements when handling personal information and works with IP rights that may be contained in the Model, and are willing to assume solely any risks and consequences that may arise from that.
10. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, DeepSeek provides the Model and the Complementary Material on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
11. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall DeepSeek be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if DeepSeek has been advised of the possibility of such damages.
12. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of DeepSeek, and only if You agree to indemnify, defend, and hold DeepSeek harmless for any liability incurred by, or claims asserted against, DeepSeek by reason of your accepting any such warranty or additional liability.
13. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
14. Governing Law and Jurisdiction. This agreement will be governed and construed under PRC laws without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this agreement. The courts located in the domicile of Hangzhou DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd. shall have exclusive jurisdiction of any dispute arising out of this agreement.
END OF TERMS AND CONDITIONS
Attachment A
Use Restrictions
You agree not to use the Model or Derivatives of the Model:
- In any way that violates any applicable national or international law or regulation or infringes upon the lawful rights and interests of any third party;
- For military use in any way;
- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
- To generate or disseminate verifiably false information and/or content with the purpose of harming others;
- To generate or disseminate inappropriate content subject to applicable regulatory requirements;
- To generate or disseminate personal identifiable information without due authorization or for unreasonable use;
- To defame, disparage or otherwise harass others;
- For fully automated decision making that adversely impacts an individuals legal rights or otherwise creates or modifies a binding, enforceable obligation;
- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories.

97
Makefile Normal file
View File

@ -0,0 +1,97 @@
print-% : ; @echo $* = $($*)
PROJECT_NAME = DeepSeek-VL
COPYRIGHT = "DeepSeek."
PROJECT_PATH = deepseek_vl
SHELL = /bin/bash
SOURCE_FOLDERS = deepseek_vl
PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi") cli_chat.py inference.py
COMMIT_HASH = $(shell git log -1 --format=%h)
PATH := $(HOME)/go/bin:$(PATH)
PYTHON ?= $(shell command -v python3 || command -v python)
PYTESTOPTS ?=
.PHONY: default
default: install
# Tools Installation
check_pip_install = $(PYTHON) -m pip show $(1) &>/dev/null || (cd && $(PYTHON) -m pip install $(1) --upgrade)
check_pip_install_extra = $(PYTHON) -m pip show $(1) &>/dev/null || (cd && $(PYTHON) -m pip install $(2) --upgrade)
pylint-install:
$(call check_pip_install_extra,pylint,pylint[spelling])
$(call check_pip_install,pyenchant)
flake8-install:
$(call check_pip_install,flake8)
$(call check_pip_install,flake8-bugbear)
$(call check_pip_install,flake8-comprehensions)
$(call check_pip_install,flake8-docstrings)
$(call check_pip_install,flake8-pyi)
$(call check_pip_install,flake8-simplify)
py-format-install:
$(call check_pip_install,isort)
$(call check_pip_install_extra,black,black[jupyter])
ruff-install:
$(call check_pip_install,ruff)
mypy-install:
$(call check_pip_install,mypy)
pre-commit-install:
$(call check_pip_install,pre-commit)
$(PYTHON) -m pre_commit install --install-hooks
go-install:
# requires go >= 1.16
command -v go || (sudo apt-get install -y golang && sudo ln -sf /usr/lib/go/bin/go /usr/bin/go)
addlicense-install: go-install
command -v addlicense || go install github.com/google/addlicense@latest
addlicense: addlicense-install
addlicense -c $(COPYRIGHT) -ignore tests/coverage.xml -l mit -y 2023-$(shell date +"%Y") -check $(SOURCE_FOLDERS)
# Python linters
pylint: pylint-install
$(PYTHON) -m pylint $(PROJECT_PATH)
flake8: flake8-install
$(PYTHON) -m flake8 --count --show-source --statistics
py-format: py-format-install
$(PYTHON) -m isort --project $(PROJECT_PATH) --check $(PYTHON_FILES) && \
$(PYTHON) -m black --check $(PYTHON_FILES)
ruff: ruff-install
$(PYTHON) -m ruff check .
ruff-fix: ruff-install
$(PYTHON) -m ruff check . --fix --exit-non-zero-on-fix
mypy: mypy-install
$(PYTHON) -m mypy $(PROJECT_PATH) --install-types --non-interactive
pre-commit: pre-commit-install
$(PYTHON) -m pre_commit run --all-files
# Utility functions
lint: ruff flake8 py-format mypy pylint addlicense
format: py-format-install ruff-install addlicense-install
$(PYTHON) -m isort --project $(PROJECT_PATH) $(PYTHON_FILES)
$(PYTHON) -m black $(PYTHON_FILES)
$(PYTHON) -m ruff check . --fix --exit-zero
addlicense -c $(COPYRIGHT) -ignore tests/coverage.xml -l mit -y 2023-$(shell date +"%Y") $(SOURCE_FOLDERS) cli_chat.py inference.py
clean-py:
find . -type f -name '*.py[co]' -delete
find . -depth -type d -name "__pycache__" -exec rm -r "{}" +
find . -depth -type d -name ".ruff_cache" -exec rm -r "{}" +
find . -depth -type d -name ".mypy_cache" -exec rm -r "{}" +
clean: clean-py

196
README.md Normal file
View File

@ -0,0 +1,196 @@
<!-- markdownlint-disable first-line-h1 -->
<!-- markdownlint-disable html -->
<!-- markdownlint-disable no-duplicate-header -->
<div align="center">
<img src="images/logo.svg" width="60%" alt="DeepSeek LLM" />
</div>
<hr>
<div align="center">
<a href="https://www.deepseek.com/" target="_blank">
<img alt="Homepage" src="images/badge.svg" />
</a>
<a href="" target="_blank">
<img alt="Chat" src="https://img.shields.io/badge/🤖%20Chat-DeepSeek%20VL-536af5?color=536af5&logoColor=white" />
</a>
<a href="https://huggingface.co/deepseek-ai" target="_blank">
<img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white" />
</a>
</div>
<div align="center">
<a href="https://discord.gg/Tc7c45Zzu5" target="_blank">
<img alt="Discord" src="https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da" />
</a>
<a href="images/qr.jpeg" target="_blank">
<img alt="Wechat" src="https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white" />
</a>
<a href="https://twitter.com/deepseek_ai" target="_blank">
<img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white" />
</a>
</div>
<div align="center">
<a href="LICENSE-CODE">
<img alt="Code License" src="https://img.shields.io/badge/Code_License-MIT-f5de53?&color=f5de53">
</a>
<a href="LICENSE-MODEL">
<img alt="Model License" src="https://img.shields.io/badge/Model_License-Model_Agreement-f5de53?&color=f5de53">
</a>
</div>
<p align="center">
<a href="https://github.com/deepseek-ai/DeepSeek-VL2/tree/main?tab=readme-ov-file#3-model-download"><b>📥 Model Download</b></a> |
<a href="https://github.com/deepseek-ai/DeepSeek-VL2/tree/main?tab=readme-ov-file#4-quick-start"><b>⚡ Quick Start</b></a> |
<a href="https://github.com/deepseek-ai/DeepSeek-VL2/tree/main?tab=readme-ov-file#5-license"><b>📜 License</b></a> |
<a href="https://github.com/deepseek-ai/DeepSeek-VL2/tree/main?tab=readme-ov-file#6-citation"><b>📖 Citation</b></a> <br>
<a href="./DeepSeek_VL2_paper.pdf"><b>📄 Paper Link</b></a> |
<a href=""><b>👁️ Demo</b></a>
</p>
## 1. Introduction
Introducing DeepSeek-VL2, an advanced series of large Mixture-of-Experts (MoE) Vision-Language Models that significantly improves upon its predecessor, DeepSeek-VL. DeepSeek-VL2 demonstrates superior capabilities across various tasks, including but not limited to visual question answering, optical character recognition, document/table/chart understanding, and visual grounding. Our model series is composed of three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2, with 1.0B, 2.8B and 4.5B activated parameters respectively.
DeepSeek-VL2 achieves competitive or state-of-the-art performance with similar or fewer activated parameters compared to existing open-source dense and MoE-based models.
[DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding]()
Zhiyu Wu*, Xiaokang Chen*, Zizheng Pan*, Xingchao Liu*, Wen Liu**, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, Zhenda Xie, Yu Wu, Kai Hu, Jiawei Wang, Yaofeng Sun, Yukun Li, Yishi Piao, Kang Guan, Aixin Liu, Xin Xie, Yuxiang You, Kai Dong, Xingkai Yu, Haowei Zhang, Liang Zhao, Yisong Wang, Chong Ruan*** (* Equal Contribution, ** Project Lead, *** Corresponding author)
![](./images/vl2_teaser.jpeg)
## 2. Release
<b>2024-12-13</b>: DeepSeek-VL2 family released, including <code>DeepSeek-VL2-tiny</code>, <code>DeepSeek-VL2-small</code>, <code>DeepSeek-VL2</code>.
## 3. Model Download
We release the DeepSeek-VL2 family, including <code>DeepSeek-VL2-tiny</code>, <code>DeepSeek-VL2-small</code>, <code>DeepSeek-VL2</code>.
To support a broader and more diverse range of research within both academic and commercial communities.
Please note that the use of this model is subject to the terms outlined in [License section](#5-license).
### Huggingface
| Model | Sequence Length | Download |
|--------------|-----------------|-----------------------------------------------------------------------------|
| DeepSeek-VL2-tiny | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2-tiny) |
| DeepSeek-VL2-small | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2-small) |
| DeepSeek-VL2 | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2) |
## 4. Quick Start
### Installation
On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command:
```shell
pip install -e .
```
### Simple Inference Example
```python
import torch
from transformers import AutoModelForCausalLM
from deepseek_vl.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
from deepseek_vl.utils.io import load_pil_images
# specify the path to the model
model_path = "deepseek-ai/deepseek-vl2-small"
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
## single image conversation example
conversation = [
{
"role": "<|User|>",
"content": "<image>\n<|ref|>The giraffe at the back.<|/ref|>.",
"images": ["./images/visual_grounding.jpeg"],
},
{"role": "<|Assistant|>", "content": ""},
]
## multiple images (or in-context learning) conversation example
# conversation = [
# {
# "role": "User",
# "content": "<image_placeholder>A dog wearing nothing in the foreground, "
# "<image_placeholder>a dog wearing a santa hat, "
# "<image_placeholder>a dog wearing a wizard outfit, and "
# "<image_placeholder>what's the dog wearing?",
# "images": [
# "images/dog_a.png",
# "images/dog_b.png",
# "images/dog_c.png",
# "images/dog_d.png",
# ],
# },
# {"role": "Assistant", "content": ""}
# ]
# load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
conversations=conversation,
images=pil_images,
force_batchify=True,
system_prompt=""
).to(vl_gpt.device)
# run image encoder to get the image embeddings
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
# run the model to get the response
outputs = vl_gpt.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
do_sample=False,
use_cache=True
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
print(f"{prepare_inputs['sft_format'][0]}", answer)
```
### Gradio Demo (TODO)
### Demo
This figure present some examples of DeepSeek-VL2.
![](./images/github_demo.png)
## 5. License
This code repository is licensed under [MIT License](./LICENSE-CODE). The use of DeepSeek-VL2 models is subject to [DeepSeek Model License](./LICENSE-MODEL). DeepSeek-VL2 series supports commercial use.
## 6. Citation
```
@misc{wu2024deepseekvl2,
title={DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding},
author={Wu, Zhiyu and Chen, Xiaokang and Pan, Zizheng and Liu, Xingchao and Liu, Wen and Dai, Damai and Gao, Huazuo and Ma, Yiyang and Wu, Chengyue and Wang, Bingxuan and Xie, Zhenda and Wu, Yu and Hu, Kai and Wang, Jiawei and Sun, Yaofeng and Li, Yukun and Piao, Yishi and Guan, Kang and Liu, Aixin and Xie, Xin and You, Yuxiang and Dong, Kai and Yu, Xingkai and Zhang, Haowei and Zhao, Liang and Wang, Yisong and Ruan, Chong},
year={2024},
}
```
## 7. Contact
If you have any questions, please raise an issue or contact us at [service@deepseek.com](mailto:service@deepseek.com).

31
deepseek_vl/__init__.py Normal file
View File

@ -0,0 +1,31 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# check if python version is above 3.10
import sys
if sys.version_info >= (3, 10):
print("Python version is above 3.10, patching the collections module.")
# Monkey patch collections
import collections
import collections.abc
for type_name in collections.abc.__all__:
setattr(collections, type_name, getattr(collections.abc, type_name))

View File

@ -0,0 +1,26 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .processing_deepseek_vl_v2 import DeepseekVLV2Processor
from .modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM
__all__ = [
"DeepseekVLV2Processor",
"DeepseekVLV2ForCausalLM",
]

View File

@ -0,0 +1,210 @@
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class DeepseekV2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DeepSeek-V2 with multi-latent attention.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 102400):
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DeepseekV2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
moe_intermediate_size (`int`, *optional*, defaults to 1407):
Dimension of the MoE representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
n_shared_experts (`int`, *optional*, defaults to None):
Number of shared experts, None means dense model.
n_routed_experts (`int`, *optional*, defaults to None):
Number of routed experts, None means dense model.
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
Scaling factor or routed experts.
topk_method (`str`, *optional*, defaults to `gready`):
Topk method used in routed gate.
n_group (`int`, *optional*, defaults to None):
Number of groups for routed experts.
topk_group (`int`, *optional*, defaults to None):
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
num_experts_per_tok (`int`, *optional*, defaults to None):
Number of selected experts, None means dense model.
moe_layer_freq (`int`, *optional*, defaults to 1):
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
first_k_dense_replace (`int`, *optional*, defaults to 0):
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
\--k dense layers--/
norm_topk_prob (`bool`, *optional*, defaults to False):
Whether to normalize the weights of the routed experts.
scoring_func (`str`, *optional*, defaults to 'softmax'):
Method of computing expert weights.
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
Auxiliary loss weight coefficient.
seq_aux = (`bool`, *optional*, defaults to True):
Whether to compute the auxiliary loss for each individual sample.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
use_mla (`bool`, *optional*, defaults to `True`): Use multi-latent attention or multi-head attention. If True,
the model will use multi-latent attention, otherwise, it will use multi-head attention.
```python
>>> from transformers import DeepseekV2Model, DeepseekV2Config
>>> # Initializing a Deepseek-V2 style configuration
>>> configuration = DeepseekV2Config()
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deepseek_v2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=102400,
hidden_size=4096,
intermediate_size=11008,
moe_intermediate_size = 1407,
num_hidden_layers=30,
num_attention_heads=32,
num_key_value_heads=32,
n_shared_experts = None,
n_routed_experts = None,
ep_size = 1,
routed_scaling_factor = 1.0,
kv_lora_rank = 512,
q_lora_rank = 1536,
qk_rope_head_dim = 64,
v_head_dim = 128,
qk_nope_head_dim = 128,
topk_method = 'gready',
n_group = None,
topk_group = None,
num_experts_per_tok = None,
moe_layer_freq = 1,
first_k_dense_replace = 0,
norm_topk_prob = False,
scoring_func = 'softmax',
aux_loss_alpha = 0.001,
seq_aux = True,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=100000,
eos_token_id=100001,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
use_mla=True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.moe_intermediate_size = moe_intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.n_shared_experts = n_shared_experts
self.n_routed_experts = n_routed_experts
self.ep_size = ep_size
self.routed_scaling_factor = routed_scaling_factor
self.kv_lora_rank = kv_lora_rank
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.topk_method = topk_method
self.n_group = n_group
self.topk_group = topk_group
self.num_experts_per_tok = num_experts_per_tok
self.moe_layer_freq = moe_layer_freq
self.first_k_dense_replace = first_k_dense_replace
self.norm_topk_prob = norm_topk_prob
self.scoring_func = scoring_func
self.aux_loss_alpha = aux_loss_alpha
self.seq_aux = seq_aux
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = float(rms_norm_eps)
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.use_mla = use_mla
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)

View File

@ -0,0 +1,310 @@
"""
From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
"""
import dataclasses
from enum import IntEnum, auto
from typing import Any, Dict, List
class SeparatorStyle(IntEnum):
"""Separator styles."""
DeepSeek = auto()
DeepSeekV2 = auto()
PLAIN = auto()
ALIGNMENT = auto()
@dataclasses.dataclass
class Conversation:
"""A class that manages prompt templates and keeps all conversation history."""
# The name of this template
name: str
# The template of the system prompt
system_template: str = "{system_message}"
# The system message
system_message: str = ""
# The names of two roles
roles: List[str] = (("USER", "ASSISTANT"),)
# All messages. Each item is (role, message).
messages: List[List[str]] = ()
# The number of few shot examples
offset: int = 0
# The separator style and configurations
sep_style: SeparatorStyle = SeparatorStyle.DeepSeek
sep: str = "\n"
sep2: str = None
# Stop criteria (the default one is EOS token)
stop_str: str = None
# Stops generation if meeting any token in this list
stop_token_ids: List[int] = None
def get_prompt(self) -> str:
"""Get the prompt for generation."""
system_prompt = self.system_template.format(system_message=self.system_message)
if self.sep_style == SeparatorStyle.DeepSeek:
seps = [self.sep, self.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
elif self.sep_style == SeparatorStyle.DeepSeekV2:
seps = [self.sep, self.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(self.messages):
if message:
if role == "User":
ret += "<sft▁begin>\n" + message + self.sep #<sft▁begin>User Input<sft▁end>\nResponse<end▁of▁sentence>
else:
ret += message + self.sep2
else:
ret = ret
return ret
elif self.sep_style == SeparatorStyle.PLAIN:
seps = [self.sep, self.sep2]
ret = ""
for i, (role, message) in enumerate(self.messages):
if message:
if type(message) is tuple:
message, _, _ = message
if i % 2 == 0:
ret += message + seps[i % 2]
else:
ret += message + seps[i % 2]
else:
ret += ""
return ret
elif self.sep_style == SeparatorStyle.ALIGNMENT:
seps = [self.sep, self.sep2]
ret = ""
for i, (role, message) in enumerate(self.messages):
if message:
if type(message) is tuple:
message, _, _ = message
if i % 2 == 0:
ret += '<image>\n' + seps[i % 2]
else:
ret += message + seps[i % 2]
else:
ret += ""
return ret
else:
raise ValueError(f"Invalid style: {self.sep_style}")
def set_system_message(self, system_message: str):
"""Set the system message."""
self.system_message = system_message
def append_message(self, role: str, message: str):
"""Append a new message."""
self.messages.append([role, message])
def update_last_message(self, message: str):
"""Update the last output.
The last message is typically set to be None when constructing the prompt,
so we need to update it in-place after getting the response from a model.
"""
self.messages[-1][1] = message
def reset_message(self):
"""Reset a new message."""
self.messages = []
def to_gradio_chatbot(self):
"""Convert the conversation to gradio chatbot format."""
ret = []
for i, (role, msg) in enumerate(self.messages[self.offset :]):
if i % 2 == 0:
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def to_openai_api_messages(self):
"""Convert the conversation to OpenAI chat completion format."""
system_prompt = self.system_template.format(system_message=self.system_message)
ret = [{"role": "system", "content": system_prompt}]
for i, (_, msg) in enumerate(self.messages[self.offset :]):
if i % 2 == 0:
ret.append({"role": "user", "content": msg})
else:
if msg is not None:
ret.append({"role": "assistant", "content": msg})
return ret
def copy(self):
return Conversation(
name=self.name,
system_template=self.system_template,
system_message=self.system_message,
roles=self.roles,
messages=[[x, y] for x, y in self.messages],
offset=self.offset,
sep_style=self.sep_style,
sep=self.sep,
sep2=self.sep2,
stop_str=self.stop_str,
stop_token_ids=self.stop_token_ids,
)
def dict(self):
return {
"template_name": self.name,
"system_message": self.system_message,
"roles": self.roles,
"messages": self.messages,
"offset": self.offset,
}
# A global registry for all conversation templates
conv_templates: Dict[str, Conversation] = {}
def register_conv_template(template: Conversation, override: bool = False):
"""Register a new conversation template."""
if not override:
assert template.name not in conv_templates, f"{template.name} has been registered."
conv_templates[template.name] = template
def get_conv_template(name: str) -> Conversation:
"""Get a conversation template."""
return conv_templates[name].copy()
# register_conv_template(
# Conversation(
# name="deepseek",
# system_template="{system_message}",
# # system_message="You are a helpful assistant. Please answer truthfully and write out your "
# # "thinking step by step to be sure you get the right answer.",
# system_message="",
# roles=("User", "Assistant"),
# messages=(),
# offset=0,
# sep_style=SeparatorStyle.DeepSeek,
# sep="\n\n",
# sep2="<end▁of▁sentence>",
# stop_token_ids=[100001],
# stop_str=["User:", "<end▁of▁sentence>"]
# )
# )
register_conv_template(
Conversation(
name="deepseek",
system_template="{system_message}",
# system_message="You are a helpful assistant. Please answer truthfully and write out your "
# "thinking step by step to be sure you get the right answer.",
system_message="",
roles=("<|User|>", "<|Assistant|>"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DeepSeek,
sep="\n\n",
sep2="<end▁of▁sentence>",
stop_token_ids=[100001],
stop_str=["User:", "<end▁of▁sentence>"]
)
)
# register_conv_template(
# Conversation(
# name="deepseekv2",
# system_template="{system_message}",
# system_message="",
# roles=("User", "Assistant"),
# messages=(),
# offset=0,
# sep_style=SeparatorStyle.DeepSeekV2,
# sep="\n<sft▁end>",
# sep2="<end▁of▁sentence>",
# stop_token_ids=[100001],
# stop_str=["User:", "<end▁of▁sentence>"]
# )
# )
register_conv_template(
Conversation(
name="deepseekv2",
system_template="{system_message}",
system_message="",
roles=("|<User>|", "|<Assistant>|"),
messages=(),
offset=0,
sep_style=SeparatorStyle.DeepSeekV2,
sep="\n<sft▁end>",
sep2="<end▁of▁sentence>",
stop_token_ids=[100001],
stop_str=["User:", "<end▁of▁sentence>"]
)
)
register_conv_template(
Conversation(
name="plain",
system_template="",
system_message="",
roles=("", ""),
messages=(),
offset=0,
sep_style=SeparatorStyle.PLAIN,
sep="",
sep2="",
stop_token_ids=[100001],
stop_str=['</s>'],
)
)
register_conv_template(
Conversation(
name="alignment",
system_template="",
system_message="",
roles=("", ""),
messages=(),
offset=0,
sep_style=SeparatorStyle.ALIGNMENT,
sep="",
sep2="",
stop_token_ids=[100001],
stop_str=['</s>'],
)
)
if __name__ == "__main__":
print("deepseek template:")
conv = get_conv_template("deepseek")
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi! This is Tony.")
conv.append_message(conv.roles[0], "Who are you?")
conv.append_message(conv.roles[1], "I am a helpful assistant.")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt())
print("deepseekv2 template:")
conv = get_conv_template("deepseekv2")
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi! This is Tony.")
conv.append_message(conv.roles[0], "Who are you?")
conv.append_message(conv.roles[1], "I am a helpful assistant.")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt())

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,472 @@
from attrdict import AttrDict
from einops import rearrange, repeat
from typing import Optional, List, Tuple, Callable, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.configuration_utils import PretrainedConfig
from transformers import (
AutoConfig,
AutoModelForCausalLM,
PreTrainedModel, GenerationConfig, LogitsProcessorList, StoppingCriteriaList,
)
from transformers.generation.utils import GenerateOutput
from .siglip_vit import VisionTransformer
from .configuration_deepseek import DeepseekV2Config
from .modeling_deepseek import DeepseekV2ForCausalLM
class MlpProjector(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
if cfg.projector_type == "identity":
modules = nn.Identity()
elif cfg.projector_type == "linear":
modules = nn.Linear(cfg.input_dim, cfg.n_embed)
elif cfg.projector_type == "mlp_gelu":
mlp_depth = cfg.depth
modules = [nn.Linear(cfg.input_dim, cfg.n_embed)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "downsample_mlp_gelu":
mlp_depth = cfg.depth
mlp_ratio = cfg.mlp_ratio
modules = [nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)]
for _ in range(1, mlp_depth - 1):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
modules = nn.Sequential(*modules)
else:
raise ValueError(f"Unknown projector type: {cfg.projector_type}")
if cfg.token_pooling:
self.token_pooling_layer = nn.Linear(cfg.input_dim * 4, cfg.input_dim)
self.layers = modules
def forward(self, x):
if self.cfg.token_pooling:
batch_size, wxh, channels = x.shape
w = h = int(wxh ** 0.5)
x = x.view(batch_size, w, h, channels)
x = x.permute(0, 3, 1, 2)
# import ipdb; ipdb.set_trace()
patches = x.unfold(2, 2, 2).unfold(3, 2, 2)
batch_size, channels, h_patches, w_patches, _, _ = patches.size()
# 在通道维度上拼接
patches = patches.contiguous().view(batch_size, channels, h_patches * w_patches, -1)
# 通过线性层
patches = patches.permute(0, 2, 1, 3).contiguous()
patches = patches.view(batch_size, h_patches * w_patches, channels * 4)
x = self.token_pooling_layer(patches)
elif self.cfg.projector_type == 'downsample_mlp_gelu':
bs, hw, input_dim = x.shape
h = w = int((hw) ** 0.5)
"""compute padding"""
if h % self.cfg.downsample_ratio:
pad = self.cfg.downsample_ratio - h % self.cfg.downsample_ratio
else:
pad = 0
x = x.reshape(bs, h, w, input_dim)
if pad > 0:
x = F.pad(x, (0, 0, 0, pad, 0, pad), "constant", 0)
"""4 to 1 concat"""
x = x.permute(0, 3, 1, 2) # B, C, H, W
x = F.unfold(x, kernel_size=self.cfg.downsample_ratio, stride=self.cfg.downsample_ratio,
padding=0) # B, C*4, HW // 4
x = x.permute(0, 2, 1)
return self.layers(x)
class VisionEncoderConfig(PretrainedConfig):
model_type: str = "vision"
model_name: str = "siglip_large_patch16_384"
image_size: int = 384
patch_size: int = 16
width: int = 1024
layers: int = 24
heads: int = 16
mlp_ratio: int = 4
global_pool: str = "map"
ignore_head: bool = True
class_token: bool = False
num_classes: int = 0
use_checkpoint: bool = False
weight_init: str = "skip"
deterministic: bool = False
num_recomputing_layers: int = 0
def __init__(
self,
model_name: str = "siglip_large_patch16_384",
image_size: int = 384,
patch_size: int = 16,
width: int = 1024,
layers: int = 24,
heads: int = 16,
mlp_ratio: int = 4,
global_pool: str = "map",
ignore_head: bool = True,
class_token: bool = False,
num_classes: int = 0,
use_checkpoint: bool = False,
**kwargs
):
self.model_name = model_name
self.image_size = image_size
self.patch_size = patch_size
self.width = width
self.layers = layers
self.heads = heads
self.mlp_ratio = mlp_ratio
self.global_pool = global_pool
self.ignore_head = ignore_head
self.class_token = class_token
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
super().__init__(**kwargs)
class MlpProjectorConfig(PretrainedConfig):
model_type = "mlp_projector"
projector_type: str = "downsample_mlp_gelu"
input_dim: int = 1152
n_embed: int = 2048
depth: int = 2
mlp_ratio: int = 1
downsample_ratio: int = 2
token_pooling: bool = False
def __init__(
self,
projector_type: str = "downsample_mlp_gelu",
input_dim: int = 1152,
n_embed: int = 2048,
depth: int = 2,
mlp_ratio: int = 1,
downsample_ratio: int = 2,
**kwargs
):
self.projector_type = projector_type
self.input_dim = input_dim
self.n_embed = n_embed
self.depth = depth
self.mlp_ratio = mlp_ratio
self.downsample_ratio = downsample_ratio
super().__init__(**kwargs)
class DeepseekVLV2Config(PretrainedConfig):
model_type = "deepseek_vl_v2"
vision_config: VisionEncoderConfig
projector_config: MlpProjectorConfig
language_config: DeepseekV2Config
tile_tag: str = "2D"
global_view_pos: str = "head"
candidate_resolutions: Tuple[Tuple[int, int]] = ((384, 384),)
def __init__(
self,
tile_tag: str = "tile_tag",
global_view_pos: str = "head",
candidate_resolutions: Tuple[Tuple[int, int]] = ((384, 384),),
**kwargs
):
super().__init__(**kwargs)
vision_config = kwargs.get("vision_config", {})
self.vision_config = VisionEncoderConfig(**vision_config)
projector_config = kwargs.get("projector_config", {})
self.projector_config = MlpProjectorConfig(**projector_config)
language_config = kwargs.get("language_config", {})
if isinstance(language_config, DeepseekV2Config):
self.language_config = language_config
else:
self.language_config = DeepseekV2Config(**language_config)
self.tile_tag = tile_tag
self.global_view_pos = global_view_pos
self.candidate_resolutions = candidate_resolutions
class DeepseekVLV2PreTrainedModel(PreTrainedModel):
config_class = DeepseekVLV2Config
base_model_prefix = "deepseek_vl_v2"
_no_split_modules = []
_skip_keys_device_placement = "past_key_values"
class DeepseekVLV2ForCausalLM(DeepseekVLV2PreTrainedModel):
def __init__(self, config: DeepseekVLV2Config):
super().__init__(config)
# ----------- vision encoder ------------
vision_config = config.vision_config
self.vision = VisionTransformer(
img_size=vision_config.image_size,
patch_size=vision_config.patch_size,
embed_dim=vision_config.width,
depth=vision_config.layers,
num_heads=vision_config.heads,
mlp_ratio=vision_config.mlp_ratio,
class_token=vision_config.class_token,
global_pool=vision_config.global_pool,
ignore_head=vision_config.ignore_head,
weight_init=vision_config.weight_init,
num_classes=0,
deterministic=vision_config.deterministic,
num_recomputing_layers=vision_config.num_recomputing_layers
)
# ----------- vl projector ------------
projector_config = config.projector_config
self.projector = MlpProjector(projector_config)
# image token format 形式
# FIXME 目前tile tag & global_view_pos的默认取值都是之前的实验策略后续应当去掉默认取值改为没有取值就raise error
self.tile_tag = config.tile_tag
self.global_view_pos = config.global_view_pos
# 用于format image token sequence的特殊token
embed_std = 1 / torch.sqrt(torch.tensor(projector_config.n_embed, dtype=torch.float32))
if self.tile_tag == "2D":
# <|view_separator|>, <|\n|>
self.image_newline = nn.Parameter(torch.randn(projector_config.n_embed) * embed_std)
# fix the typo: view_seperater
self.view_seperator = nn.Parameter(torch.randn(projector_config.n_embed) * embed_std)
elif self.tile_tag == "1D":
# <|tile_x|>, <|tile_global|>
candidate_resolutions = config.candidate_resolutions
if len(candidate_resolutions) == 0:
raise ValueError(
f"len(candidate_resolutions) should be larger than 0, but got {len(candidate_resolutions)}")
tile_variants_num = len(candidate_resolutions)
self.tile_indicators = nn.Parameter(
torch.randn(size=(tile_variants_num + 1, config.aligner.params.n_embed)) * embed_std
)
else:
raise ValueError(f"tile tag should be either 1D or 2D, but got {self.tile_tag}")
# ----------- language model ------------
language_config = config.language_config
self.language = DeepseekV2ForCausalLM(language_config)
def prepare_inputs_embeds(
self,
input_ids: torch.LongTensor,
images: torch.FloatTensor,
images_seq_mask: torch.LongTensor,
images_spatial_crop: Optional[torch.LongTensor] = None,
**ignore_kwargs
):
"""
Args:
input_ids (torch.LongTensor): [b, T]
images (torch.FloatTensor): [b, max_n_images, 3, height, width]
images_seq_mask (torch.BoolTensor): [b, T]
images_spatial_crop (torch.LongTensor): [b, max_n_images, 2]
Returns:
input_embeds (torch.Tensor): [b, T, D]
"""
if images is None or images_spatial_crop.sum() == 0:
return self.language.get_input_embeddings()(input_ids)
bs, max_n_images, _ = images_spatial_crop.shape
batch_num_tiles = [0 for _ in range(bs)]
total_tiles = []
for idx in range(bs):
for jdx in range(max_n_images):
num_width_tiles, num_height_tiles = images_spatial_crop[idx, jdx]
if num_width_tiles == 0 or num_height_tiles == 0:
break
batch_num_tiles[idx] += (1 + num_width_tiles * num_height_tiles)
total_tiles.append(images[idx, :batch_num_tiles[idx]])
# [batch_all_tiles, 3, height, width]
total_tiles = torch.cat(total_tiles, dim=0)
assert total_tiles.shape[0] == sum(batch_num_tiles)
if total_tiles.shape[0] == 0:
return self.language.get_input_embeddings()(input_ids)
# [batch_all_tiles, vit_seq_len, c]
images_feature = self.vision(total_tiles)
# [batch_all_tiles, hw, D]
images_embeds = self.projector(images_feature)
_, hw, n_dim = images_embeds.shape
h = w = int(hw ** 0.5)
# put image tokens into the input_embeds, [b, T, D]
input_embeds = self.language.get_input_embeddings()(input_ids)
# 根据self.tile_tag & self.global_view_pos填充image token sequence
tile_index = 0
for idx in range(images_spatial_crop.shape[0]):
images_in_this_batch = []
for jdx in range(images_spatial_crop.shape[1]):
# extra global & local features
num_width_tiles, num_height_tiles = images_spatial_crop[idx, jdx]
if num_width_tiles == 0 or num_height_tiles == 0:
break
num_tiles_in_image = num_width_tiles * num_height_tiles
# [hw, D]
global_features = images_embeds[tile_index]
# [num_height_tiles * num_width_tiles, hw, D]
local_features = images_embeds[tile_index + 1: tile_index + 1 + num_tiles_in_image]
tile_index += num_tiles_in_image + 1
# format global and local features
if self.tile_tag == "2D":
# ----------------- global view add newline -----------------
# [hw, D] -> [h, w, D]
global_features = global_features.view(h, w, n_dim)
# [D] -> [h, 1, D]
new_lines_in_global = repeat(self.image_newline, "d -> h 1 d", h=h)
# cat([h, w, D], [h, 1, D], dim=1) -> [h, w + 1, D]
global_features = torch.cat([global_features, new_lines_in_global], dim=1)
# [h, w + 1, D] -> [h * (w + 1), D]
global_features = global_features.view(-1, n_dim)
# ----------------- local view add newline -----------------
# [num_height_tiles * num_width_tiles, h * w, D] -> [num_height_tiles * h, num_width_tiles * w, D]
local_features = rearrange(
local_features,
"(th tw) (h w) d -> (th h) (tw w) d",
th=num_height_tiles,
tw=num_width_tiles,
h=h,
w=w
)
# [D] -> [num_height_tiles * h, 1, D]
new_lines_in_local = repeat(
self.image_newline,
"d -> (th h) 1 d",
th=num_height_tiles,
h=h
)
# [num_height_tiles * h, num_width_tiles * w + 1, D]
local_features = torch.cat([local_features, new_lines_in_local], dim=1)
# [num_height_tiles * h, num_width_tiles * w + 1, D]
# --> [(num_height_tiles * h) * (num_width_tiles * w + 1), D]
local_features = local_features.view(-1, n_dim)
# ----------------- merge global and local tiles -----------------
if self.global_view_pos == "head":
global_local_features = torch.cat(
[global_features, self.view_seperator[None, :], local_features], dim=0)
else:
global_local_features = torch.cat(
[local_features, self.view_seperator[None, :], global_features], dim=0)
else:
# abandoned实际上不会走这个逻辑
global_features = torch.cat(
[self.tile_indicators[0:1], global_features], dim=0
)
local_features = torch.cat(
[self.tile_indicators[1:num_tiles_in_image + 1].unsqueeze(1), local_features], dim=1
)
local_features = rearrange(local_features, 'crop_num hw d -> (crop_num hw) d')
if self.global_view_pos == "head":
global_local_features = torch.cat([global_features, local_features], dim=0)
else:
global_local_features = torch.cat([local_features, global_features], dim=0)
images_in_this_batch.append(global_local_features)
if len(images_in_this_batch) > 0:
images_in_this_batch = torch.cat(images_in_this_batch, dim=0)
input_embeds[idx].masked_scatter_(images_seq_mask[idx].unsqueeze(-1), images_in_this_batch)
return input_embeds
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Beam-search decoding
is controlled by the `num_beams` parameter and the `num_return_sequences` parameter.
Parameters:
- `inputs` (optional) -- `torch.LongTensor` of shape `(batch, sequence_length)`:
The sequence used as a prompt for the generation. If `None`, generate for the model's prompt.
- `generation_config` (optional) -- `GenerationConfig`:
The generation config of the model.
- `logits_processor` (optional) -- `LogitsProcessorList`:
A list of instances of :class:`~transform
"""
return self.language.generate(
inputs=inputs,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
assistant_model=assistant_model,
streamer=streamer,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
**kwargs,
)
AutoConfig.register("vision", VisionEncoderConfig)
AutoConfig.register("mlp_projector", MlpProjectorConfig)
AutoConfig.register("deepseek_vl_v2", DeepseekVLV2Config)
AutoModelForCausalLM.register(DeepseekVLV2Config, DeepseekVLV2ForCausalLM)

View File

@ -0,0 +1,675 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from dataclasses import dataclass
from typing import Dict, Tuple, List, Literal, Optional
import math
import torch
from torch.nn.utils.rnn import pad_sequence
import torchvision.transforms as T
from transformers import LlamaTokenizerFast
from transformers.processing_utils import ProcessorMixin
from PIL import Image, ImageOps
from .conversation import get_conv_template
def select_best_resolution(image_size, candidate_resolutions):
# used for cropping
original_width, original_height = image_size
best_fit = None
max_effective_resolution = 0
min_wasted_resolution = float("inf")
for width, height in candidate_resolutions:
scale = min(width / original_width, height / original_height)
downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
wasted_resolution = (width * height) - effective_resolution
if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
max_effective_resolution = effective_resolution
min_wasted_resolution = wasted_resolution
best_fit = (width, height)
return best_fit
class DictOutput(object):
def keys(self):
return self.__dict__.keys()
def __getitem__(self, item):
return self.__dict__[item]
def __setitem__(self, key, value):
self.__dict__[key] = value
# 对于inference sample也可以维护input_ids反正最后不会用到
@dataclass
class VLChatProcessorOutput(DictOutput):
sft_format: str
input_ids: torch.LongTensor
target_ids: torch.LongTensor
images: torch.Tensor
images_seq_mask: torch.BoolTensor
images_spatial_crop: torch.LongTensor
num_image_tokens: List[int]
def __len__(self):
return len(self.input_ids)
@dataclass
class BatchCollateOutput(DictOutput):
sft_format: List[str]
input_ids: torch.LongTensor
labels: torch.LongTensor
images: torch.Tensor
attention_mask: torch.Tensor
images_seq_mask: torch.BoolTensor
images_spatial_crop: torch.LongTensor
seq_lens: List[int]
def to(self, device, dtype=torch.bfloat16):
self.input_ids = self.input_ids.to(device)
self.labels = self.labels.to(device)
self.attention_mask = self.attention_mask.to(device)
self.images_seq_mask = self.images_seq_mask.to(device)
self.images_spatial_crop = self.images_spatial_crop.to(device)
self.images = self.images.to(device=device, dtype=dtype)
return self
class ImageTransform(object):
def __init__(
self,
mean: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
std: Optional[Tuple[float, float, float]] = (0.5, 0.5, 0.5),
normalize: bool = True
):
self.mean = mean
self.std = std
self.normalize = normalize
transform_pipelines = [
T.ToTensor()
]
if normalize:
transform_pipelines.append(T.Normalize(mean, std))
self.transform = T.Compose(transform_pipelines)
def __call__(self, pil_img: Image.Image):
x = self.transform(pil_img)
return x
class DeepseekVLV2Processor(ProcessorMixin):
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
attributes = ["tokenizer"]
def __init__(
self,
tokenizer: LlamaTokenizerFast,
candidate_resolutions: Tuple[Tuple[int, int]],
patch_size: int,
downsample_ratio: int,
image_mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
image_std: Tuple[float, float, float] = (0.5, 0.5, 0.5),
normalize: bool = True,
image_token: str = "<image>",
pad_token: str = "<▁pad▁>",
add_special_token: bool = False,
sft_format: str = "deepseek",
mask_prompt: bool = True,
ignore_id: int = -100,
**kwargs,
):
self.candidate_resolutions = candidate_resolutions
self.image_size = candidate_resolutions[0][0]
self.patch_size = patch_size
self.image_mean = image_mean
self.image_std = image_std
self.normalize = normalize
self.downsample_ratio = downsample_ratio
self.image_transform = ImageTransform(mean=image_mean, std=image_std, normalize=normalize)
self.tokenizer = tokenizer
self.tokenizer.padding_side = 'left' # must set thispadding side with make a difference in batch inference
# add the pad_token as special token to use 'tokenizer.pad_token' and 'tokenizer.pad_token_id'
if tokenizer.pad_token is None:
self.tokenizer.add_special_tokens({'pad_token': pad_token})
print(f"Add pad token = ['{pad_token}'] to the tokenizer\n"
f"{pad_token}:{tokenizer.encode(pad_token, add_special_tokens=False)[0]}")
# add image token
image_token_id = self.tokenizer.vocab.get(image_token)
if image_token_id is None:
special_tokens = [image_token]
special_tokens_dict = {"additional_special_tokens": special_tokens}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.image_token_id = self.tokenizer.vocab.get(image_token)
print(f"Add image token = ['{image_token}'] to the tokenizer\n"
f"{image_token}:{tokenizer.encode(image_token, add_special_tokens=False)[0]}")
# add five special tokens for grounding-related tasks
# <|ref|>, <|/ref|>, <|det|>, <|/det|>, <|grounding|>
special_tokens = ['<|ref|>', '<|/ref|>', '<|det|>', '<|/det|>', '<|grounding|>']
special_tokens_dict = {"additional_special_tokens": special_tokens}
self.tokenizer.add_special_tokens(special_tokens_dict)
print(f"Add grounding-related tokens = {special_tokens} to the tokenizer with input_ids\n"
f"<|ref|>:{tokenizer.encode('<|ref|>', add_special_tokens=False)[0]}\n"
f"<|/ref|>:{tokenizer.encode('<|/ref|>', add_special_tokens=False)[0]}\n"
f"<|det|>:{tokenizer.encode('<|det|>', add_special_tokens=False)[0]}\n"
f"<|/det|>:{tokenizer.encode('<|/det|>', add_special_tokens=False)[0]}\n"
f"<|grounding|>:{tokenizer.encode('<|grounding|>', add_special_tokens=False)[0]}")
# add special tokens for SFT data
special_tokens = ["<|User|>", "<|Assistant|>"]
special_tokens_dict = {"additional_special_tokens": special_tokens}
self.tokenizer.add_special_tokens(special_tokens_dict)
print(f"Add chat tokens = {special_tokens} to the tokenizer with input_ids\n"
f"<|User|>:{tokenizer.encode('<|User|>', add_special_tokens=False)[0]}\n"
f"<|Assistant|>:{tokenizer.encode('<|Assistant|>', add_special_tokens=False)[0]}\n")
self.image_token = image_token
self.pad_token = pad_token
self.add_special_token = add_special_token
self.sft_format = sft_format
self.mask_prompt = mask_prompt
self.ignore_id = ignore_id
super().__init__(
tokenizer,
**kwargs,
)
def new_chat_template(self):
conv = get_conv_template(self.sft_format)
return conv
def format_messages(
self,
conversations: List[Dict[str, str]],
sft_format: str = "deepseek",
system_prompt: str = "",
):
"""
Applies the SFT template to conversation.
Args:
conversations (List[Dict]): A List of messages.
sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
Returns:
sft_prompt (str): The formatted text.
"""
conv = get_conv_template(sft_format)
conv.set_system_message(system_prompt)
for message in conversations:
conv.append_message(message["role"], message["content"].strip())
sft_prompt = conv.get_prompt().strip()
return sft_prompt
def format_messages_v2(self, messages, pil_images, systems=None):
"""play the role of format_messages_v2 and get_images_info in the last version"""
tokenized_data = []
masked_tokenized_data = [] # labels
images_list = []
images_seq_mask = []
images_spatial_crop = []
num_image_tokens = []
image_index = 0
conv = get_conv_template(self.sft_format)
conv_system_message = conv.system_message
for idx, message in enumerate(messages):
if idx == 0:
tokenized_data += [self.bos_id]
masked_tokenized_data += [self.bos_id]
images_seq_mask += [False]
conv.system_message = conv_system_message
else:
conv.system_message = ''
if message['role'] == conv.roles[0] or message['role'] == "user":
conv.reset_message()
conv.append_message(conv.roles[0], str(message['content']).strip())
conv.append_message(conv.roles[1], '')
formatted_question = conv.get_prompt()
tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
formatted_question,
pil_images[image_index: image_index + formatted_question.count(self.image_token)],
bos=False,
eos=False,
cropping=len(pil_images) <= 2
)
image_index += formatted_question.count(self.image_token)
tokenized_data += tokenized_str
if self.mask_prompt:
masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
else:
masked_tokenized_data += tokenized_str
images_list += images
images_seq_mask += seq_mask
images_spatial_crop += spatial_crop
num_image_tokens += n_image_tokens
elif message['role'] == conv.roles[1] or message['role'] == "assistant":
formatted_answer = message['content'].strip()
assert formatted_answer.count(
self.image_token) == 0, f"there should be no {self.image_token} in the assistant's reply, but got {messages}"
tokenized_str, images, seq_mask, spatial_crop, n_image_tokens = self.tokenize_with_images(
formatted_answer,
[],
bos=False,
eos=True,
cropping=len(pil_images) <= 2)
tokenized_data += tokenized_str
masked_tokenized_data += tokenized_str
images_seq_mask += seq_mask
elif message['role'] == 'system' or message['role'] == 'deepseekapi-sys':
# 如果message里面有system那就只允许出现在message的第一句同时conv原本的system就会失效
assert idx == 0, 'system information should only exist in the begining of the conversation'
formatted_system = message['content'].strip()
tokenized_str = self.encode(formatted_system, bos=False, eos=False)
tokenized_data += tokenized_str
if self.mask_prompt:
masked_tokenized_data += [self.ignore_id] * len(tokenized_str)
else:
masked_tokenized_data += tokenized_str
seq_mask = [False] * len(tokenized_str)
images_seq_mask += seq_mask
else:
assert False, f"Unknown role: {message['role']}"
assert len(tokenized_data) == len(
images_seq_mask), f"format_messages_v2: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
assert len(images_spatial_crop) == len(num_image_tokens), f"image number should be compatible"
return tokenized_data, masked_tokenized_data, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
def format_prompts(
self,
prompts: str,
sft_format: str = "deepseek",
system_prompt: str = "",
):
"""
Applies the SFT template to prompts.
Args:
prompts (str): the non-sft formatted prompt;
sft_format (str, optional): The format of the SFT template to use. Defaults to "deepseek".
system_prompt (str, optional): The system prompt to use in the SFT template. Defaults to "".
Returns:
sft_prompt (str): The formatted text.
"""
conv = get_conv_template(sft_format)
conv.set_system_message(system_prompt)
conv.append_message(conv.roles[0], prompts.strip())
conv.append_message(conv.roles[1], "")
sft_prompt = conv.get_prompt().strip()
return sft_prompt
@property
def bos_id(self):
return self.tokenizer.bos_token_id
@property
def eos_id(self):
return self.tokenizer.eos_token_id
@property
def pad_id(self):
return self.tokenizer.pad_token_id
def encode(self, text: str, bos: bool = True, eos: bool = False):
t = self.tokenizer.encode(text, add_special_tokens=False)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int], **kwargs) -> str:
return self.tokenizer.decode(t, **kwargs)
def process_one(
self,
prompt: str = None,
conversations: List[Dict[str, str]] = None,
images: List[Image.Image] = None,
apply_sft_format: bool = False,
inference_mode: bool = True,
system_prompt: str = "",
**kwargs,
):
"""
Args:
prompt (str): the formatted prompt;
conversations (List[Dict]): conversations with a list of messages;
images (List[ImageType]): the list of images;
apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
if conversations is not None, then it will always apply the SFT format to conversations;
inference_mode (bool): if True, then remove the last eos token;
system_prompt (str): the system prompt;
**kwargs:
Returns:
outputs (BaseProcessorOutput): the output of the processor,
- input_ids (torch.LongTensor): [N + image tokens]
- target_ids (torch.LongTensor): [N + image tokens]
- images (torch.FloatTensor): [n_images, 3, H, W]
- image_id (int): the id of the image token
- num_image_tokens (List[int]): the number of image tokens
"""
assert (
prompt is None or conversations is None
), "prompt and conversations cannot be used at the same time."
if prompt is None:
# apply sft format
sft_format = self.format_messages(
conversations=conversations,
sft_format=self.sft_format,
system_prompt=system_prompt,
)
tokenized_str, masked_tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.format_messages_v2(
conversations, images)
else:
if apply_sft_format:
sft_format = self.format_prompts(
prompts=prompt,
sft_format=self.sft_format,
system_prompt=system_prompt
)
else:
sft_format = prompt
tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens = self.tokenize_with_images(
sft_format, images, bos=True, eos=True, cropping=len(images) <= 2)
masked_tokenized_str = []
for token_index in tokenized_str:
if token_index != self.image_token_id:
masked_tokenized_str.append(token_index)
else:
masked_tokenized_str.append(self.ignore_id)
assert len(tokenized_str) == len(images_seq_mask) == len(masked_tokenized_str), \
(f"tokenized_str's length {len(tokenized_str)}, input_ids' length {len(masked_tokenized_str)}, "
f"imags_seq_mask's length {len(images_seq_mask)}, are not equal")
input_ids = torch.LongTensor(tokenized_str)
target_ids = torch.LongTensor(masked_tokenized_str)
images_seq_mask = torch.tensor(images_seq_mask, dtype=torch.bool)
# set input_ids < 0 | input_ids == self.image_token_id as ignore_id
target_ids[(input_ids < 0) | (input_ids == self.image_token_id)] = self.ignore_id
input_ids[input_ids < 0] = self.pad_id
if inference_mode:
# 去掉结尾的eos token
assert input_ids[-1] == self.eos_id
input_ids = input_ids[:-1]
target_ids = target_ids[:-1]
images_seq_mask = images_seq_mask[:-1]
if len(images_list) == 0:
images = torch.zeros((1, 3, self.image_size, self.image_size))
images_spatial_crop = torch.zeros((1, 2), dtype=torch.long)
else:
images = torch.stack(images_list, dim=0)
images_spatial_crop = torch.tensor(images_spatial_crop, dtype=torch.long)
prepare = VLChatProcessorOutput(
sft_format=sft_format,
input_ids=input_ids,
target_ids=target_ids,
images=images,
images_seq_mask=images_seq_mask,
images_spatial_crop=images_spatial_crop,
num_image_tokens=num_image_tokens
)
return prepare
def __call__(
self,
*,
prompt: str = None,
conversations: List[Dict[str, str]] = None,
images: List[Image.Image] = None,
apply_sft_format: bool = False,
force_batchify: bool = True,
inference_mode: bool = True,
system_prompt: str = "",
**kwargs,
):
"""
Args:
prompt (str): the formatted prompt;
conversations (List[Dict]): conversations with a list of messages;
images (List[ImageType]): the list of images;
apply_sft_format (bool): if prompt is not None, then apply the SFT format to prompt;
if conversations is not None, then it will always apply the SFT format to conversations;
force_batchify (bool): force batchify the inputs;
inference_mode (bool): if True, then remove the last eos token;
system_prompt (str): the system prompt;
**kwargs:
Returns:
outputs (BaseProcessorOutput): the output of the processor,
- input_ids (torch.LongTensor): [N + image tokens]
- images (torch.FloatTensor): [n_images, 3, H, W]
- image_id (int): the id of the image token
- num_image_tokens (List[int]): the number of image tokens
"""
prepare = self.process_one(
prompt=prompt,
conversations=conversations,
images=images,
apply_sft_format=apply_sft_format,
inference_mode=inference_mode,
system_prompt=system_prompt
)
if force_batchify:
prepare = self.batchify([prepare])
return prepare
def tokenize_with_images(
self,
conversation: str,
images: List[Image.Image],
bos: bool = True,
eos: bool = True,
cropping: bool = True,
):
"""Tokenize text with <image> tags."""
assert conversation.count(self.image_token) == len(images)
text_splits = conversation.split(self.image_token)
images_list, images_seq_mask, images_spatial_crop = [], [], []
num_image_tokens = []
tokenized_str = []
for text_sep, image in zip(text_splits, images):
"""encode text_sep"""
tokenized_sep = self.encode(text_sep, bos=False, eos=False)
tokenized_str += tokenized_sep
images_seq_mask += [False] * len(tokenized_sep)
"""select best resolution for anyres"""
if cropping:
best_width, best_height = select_best_resolution(image.size, self.candidate_resolutions)
else:
best_width, best_height = self.image_size, self.image_size
# print(image.size, (best_width, best_height)) # check the select_best_resolutions func
"""process the global view"""
global_view = ImageOps.pad(image, (self.image_size, self.image_size),
color=tuple(int(x * 255) for x in self.image_transform.mean))
images_list.append(self.image_transform(global_view))
"""process the local views"""
local_view = ImageOps.pad(image, (best_width, best_height),
color=tuple(int(x * 255) for x in self.image_transform.mean))
for i in range(0, best_height, self.image_size):
for j in range(0, best_width, self.image_size):
images_list.append(
self.image_transform(local_view.crop((j, i, j + self.image_size, i + self.image_size))))
"""record height / width crop num"""
num_width_tiles, num_height_tiles = best_width // self.image_size, best_height // self.image_size
images_spatial_crop.append([num_width_tiles, num_height_tiles])
"""add image tokens"""
h = w = math.ceil((self.image_size // self.patch_size) / self.downsample_ratio)
# global views tokens h * (w + 1), 1 is for line seperator
tokenized_image = [self.image_token_id] * h * (w + 1)
# add a seperator between global and local views
tokenized_image += [self.image_token_id]
# local views tokens, (num_height_tiles * h) * (num_width_tiles * w + 1)
tokenized_image += [self.image_token_id] * (num_height_tiles * h) * (num_width_tiles * w + 1)
tokenized_str += tokenized_image
images_seq_mask += [True] * len(tokenized_image)
num_image_tokens.append(len(tokenized_image))
# print(width_crop_num, height_crop_num, len(tokenized_image)) # test the correctness of the number of image-related tokens
"""process the last text split"""
tokenized_sep = self.encode(text_splits[-1], bos=False, eos=False)
tokenized_str += tokenized_sep
images_seq_mask += [False] * len(tokenized_sep)
"""add the bos and eos tokens"""
if bos:
tokenized_str = [self.bos_id] + tokenized_str
images_seq_mask = [False] + images_seq_mask
if eos:
tokenized_str = tokenized_str + [self.eos_id]
images_seq_mask = images_seq_mask + [False]
assert len(tokenized_str) == len(
images_seq_mask), f"tokenize_with_images func: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
return tokenized_str, images_list, images_seq_mask, images_spatial_crop, num_image_tokens
def batchify(
self,
sample_list: List[VLChatProcessorOutput],
padding: Literal["left", "right"] = "left"
) -> BatchCollateOutput:
"""
Preprocesses the inputs for multimodal inference.
Args:
sample_list (List[VLChatProcessorOutput]): A list of VLChatProcessorOutput.
padding (str): The padding method. Defaults to "left".
Returns:
BatchCollateOutput: A dictionary of the inputs to use for multimodal inference.
"""
batched_sft_format = [sample.sft_format for sample in sample_list]
batched_input_ids = [sample.input_ids for sample in sample_list]
batched_labels = [sample.target_ids for sample in sample_list]
batched_images_seq_mask = [sample["images_seq_mask"] for sample in sample_list]
seq_lens = [len(sample) for sample in sample_list]
"""padding input_ids and images_seq_mask"""
if padding == "left":
# the tokenizer is default to pad at left
## TODO, You're using a LlamaTokenizerFast tokenizer.
# Please note that with a fast tokenizer, using the `__call__` method is faster than
# using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
padded_input_ids = self.tokenizer.pad({"input_ids": batched_input_ids})
batched_input_ids, batched_attention_mask = padded_input_ids["input_ids"], padded_input_ids[
"attention_mask"].bool()
batched_labels = self.tokenizer.pad({"input_ids": batched_labels})["input_ids"]
batched_labels[batched_labels == self.pad_id] = self.ignore_id # labels正常不会出现pad_id无需额外保护
batched_images_seq_mask = self.tokenizer.pad({"input_ids": batched_images_seq_mask})["input_ids"]
batched_images_seq_mask[batched_images_seq_mask == self.pad_id] = False
else:
batched_input_ids = pad_sequence(batched_input_ids, batch_first=True, padding_value=self.pad_id)
batched_labels = pad_sequence(batched_labels, batch_first=True, padding_value=self.ignore_id)
batched_images_seq_mask = pad_sequence(batched_images_seq_mask, batch_first=True, padding_value=0)
batched_attention_mask = batched_input_ids != self.pad_id
"""padding images to max_patch_num"""
max_n_patches = max(sample["images"].shape[0] for sample in sample_list)
batched_images = []
for sample in sample_list:
images = sample["images"]
n_pads = max_n_patches - images.shape[0]
if n_pads > 0:
pad_images = torch.zeros((n_pads, *images.shape[1:]), dtype=images.dtype)
images = torch.cat([images, pad_images], dim=0)
batched_images.append(images)
batched_images = torch.stack(batched_images, dim=0)
"""padding images_spatial_crop to max_n_images"""
max_n_images = max(sample["images_spatial_crop"].shape[0] for sample in sample_list)
batched_images_spatial_crop = []
for sample in sample_list:
images_spatial_crop = sample["images_spatial_crop"]
n_pads = max_n_images - sample["images_spatial_crop"].shape[0]
if n_pads > 0:
pad_images_spatial_crop = torch.full((n_pads, 2), 0, dtype=images_spatial_crop.dtype)
images_spatial_crop = torch.cat([images_spatial_crop, pad_images_spatial_crop], dim=0)
batched_images_spatial_crop.append(images_spatial_crop)
batched_images_spatial_crop = torch.stack(batched_images_spatial_crop, dim=0)
batched_samples = BatchCollateOutput(
input_ids=batched_input_ids,
attention_mask=batched_attention_mask,
labels=batched_labels,
images=batched_images,
images_seq_mask=batched_images_seq_mask,
images_spatial_crop=batched_images_spatial_crop,
sft_format=batched_sft_format,
seq_lens=seq_lens
)
return batched_samples

View File

@ -0,0 +1,656 @@
# https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py
from dataclasses import dataclass
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Final, Optional, Callable, Union, Tuple, List, Set, Dict, Type, Literal, Sequence
import math
import warnings
from timm.layers import (
PatchEmbed, Mlp, DropPath,
AttentionPoolLatent, PatchDropout, resample_abs_pos_embed, LayerType
)
from timm.models._manipulate import named_apply, checkpoint_seq, adapt_input_conv
from flash_attn import flash_attn_qkvpacked_func
from xformers.ops import memory_efficient_attention
from functools import partial
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std) # noqa: E741
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (torch.Tensor, float, float, float, float) -> torch.Tensor
r"""The original timm.models.layers.weight_init.trunc_normal_ can not handle bfloat16 yet, here we first
convert the tensor to float32, apply the trunc_normal_() in float32, and then convert it back to its orignal dtype.
Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn
from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
dtype = tensor.dtype
tensor_fp32 = tensor.float()
tensor_fp32 = _no_grad_trunc_normal_(tensor_fp32, mean, std, a, b)
tensor_dtype = tensor_fp32.to(dtype=dtype)
tensor.copy_(tensor_dtype)
def init_weights(self):
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5)
trunc_normal_(self.latent, std=self.latent_dim ** -0.5)
def init_weights_vit_timm(module: nn.Module, name: str = '') -> None:
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
class Attention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_norm: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: nn.Module = nn.LayerNorm,
deterministic: bool = False,
) -> None:
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.qk_norm = qk_norm
self.fused_attn = True
self.deterministic = deterministic
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop) if proj_drop > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim)
if not self.qk_norm:
if self.head_dim % 32 == 0:
# flashattn的head_dim必须是32的倍数SigLIP-SO400M无法使用flashattn
x = flash_attn_qkvpacked_func(qkv, dropout_p=self.attn_drop.p if self.training else 0.,
deterministic=self.deterministic)
else:
q, k, v = qkv.unbind(2)
x = memory_efficient_attention(q, k, v, p=self.attn_drop.p if self.training else 0.)
x = x.reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q, k = self.q_norm(q), self.k_norm(k)
if self.fused_attn:
with torch.backends.cuda.sdp_kernel(enable_math=False, enable_mem_efficient=False):
# 用上下文的方式强行使用fa
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(
self,
dim: int,
init_values: float = 1e-5,
inplace: bool = False,
) -> None:
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.,
qkv_bias: bool = False,
qk_norm: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
init_values: Optional[float] = None,
drop_path: float = 0.,
act_layer: nn.Module = nn.GELU,
norm_layer: nn.Module = nn.LayerNorm,
mlp_layer: nn.Module = Mlp,
deterministic: bool = False,
) -> None:
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
deterministic=deterministic,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = mlp_layer(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
dynamic_img_size: Final[bool]
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: Literal['', 'avg', 'token', 'map'] = 'token',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
qk_norm: bool = False,
init_values: Optional[float] = None,
class_token: bool = True,
no_embed_class: bool = False,
reg_tokens: int = 0,
pre_norm: bool = False,
fc_norm: Optional[bool] = None,
dynamic_img_size: bool = False,
dynamic_img_pad: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
patch_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', ''] = '',
embed_layer: Callable = PatchEmbed,
norm_layer: Optional[LayerType] = None,
act_layer: Optional[LayerType] = None,
block_fn: Type[nn.Module] = Block,
mlp_layer: Type[nn.Module] = Mlp,
ignore_head: bool = False,
deterministic: bool = False,
num_recomputing_layers: int = 0
) -> None:
"""
Args:
img_size: Input image size.
patch_size: Patch size.
in_chans: Number of image input channels.
num_classes: Mumber of classes for classification head.
global_pool: Type of global pooling for final sequence (default: 'token').
embed_dim: Transformer embedding dimension.
depth: Depth of transformer.
num_heads: Number of attention heads.
mlp_ratio: Ratio of mlp hidden dim to embedding dim.
qkv_bias: Enable bias for qkv projections if True.
init_values: Layer-scale init values (layer-scale enabled if not None).
class_token: Use class token.
no_embed_class: Don't include position embeddings for class (or reg) tokens.
reg_tokens: Number of register tokens.
fc_norm: Pre head norm after pool (instead of before), if None, enabled when global_pool == 'avg'.
drop_rate: Head dropout rate.
pos_drop_rate: Position embedding dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Stochastic depth rate.
weight_init: Weight initialization scheme.
embed_layer: Patch embedding layer.
norm_layer: Normalization layer.
act_layer: MLP activation layer.
block_fn: Transformer block layer.
"""
super().__init__()
assert global_pool in ('', 'avg', 'token', 'map')
assert class_token or global_pool != 'token'
use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
# norm_layer = get_norm_layer(norm_layer) or partial(nn.LayerNorm, eps=1e-6)
# act_layer = get_act_layer(act_layer) or nn.GELU
norm_layer = partial(nn.LayerNorm, eps=1e-6)
# siglip use PytorchGELUTanh() rather than the vanilla nn.GELU()
# https://github.com/huggingface/transformers/blob/78b2929c0554b79e0489b451ce4ece14d265ead2/src/transformers/models/siglip/configuration_siglip.py#L191
act_layer = partial(nn.GELU, approximate='tanh')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.num_prefix_tokens += reg_tokens
self.num_reg_tokens = reg_tokens
self.has_class_token = class_token
self.no_embed_class = no_embed_class # don't embed prefix positions (includes reg)
self.dynamic_img_size = dynamic_img_size
self.grad_checkpointing = False
self.ignore_head = ignore_head
self.num_recomputing_layers = num_recomputing_layers
embed_args = {}
if dynamic_img_size:
# flatten deferred until after pos embed
embed_args.update(dict(strict_img_size=False, output_fmt='NHWC'))
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
dynamic_img_pad=dynamic_img_pad,
**embed_args,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None
embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if patch_drop_rate > 0:
self.patch_drop = PatchDropout(
patch_drop_rate,
num_prefix_tokens=self.num_prefix_tokens,
)
else:
self.patch_drop = nn.Identity()
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
mlp_layer=mlp_layer,
deterministic=deterministic,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
# Classifier Head
if global_pool == 'map':
AttentionPoolLatent.init_weights = init_weights
self.attn_pool = AttentionPoolLatent(
self.embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
norm_layer=norm_layer,
)
else:
self.attn_pool = None
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode: Literal['jax', 'jax_nlhb', 'moco', ''] = '') -> None:
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(init_weights_vit_timm, self)
@torch.jit.ignore
def no_weight_decay(self) -> Set:
return {'pos_embed', 'cls_token', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse: bool = False) -> Dict:
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable: bool = True) -> None:
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool=None) -> None:
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token', 'map')
if global_pool == 'map' and self.attn_pool is None:
assert False, "Cannot currently add attention pooling in reset_classifier()."
elif global_pool != 'map ' and self.attn_pool is not None:
self.attn_pool = None # remove attention pooling
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x: torch.Tensor) -> torch.Tensor:
if self.dynamic_img_size:
B, H, W, C = x.shape
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
)
x = x.view(B, -1, C)
else:
pos_embed = self.pos_embed
to_cat = []
if self.cls_token is not None:
to_cat.append(self.cls_token.expand(x.shape[0], -1, -1))
if self.reg_token is not None:
to_cat.append(self.reg_token.expand(x.shape[0], -1, -1))
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + pos_embed
if to_cat:
x = torch.cat(to_cat + [x], dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
if to_cat:
x = torch.cat(to_cat + [x], dim=1)
x = x + pos_embed
return self.pos_drop(x)
def _intermediate_layers(
self,
x: torch.Tensor,
n: Union[int, Sequence] = 1,
) -> List[torch.Tensor]:
outputs, num_blocks = [], len(self.blocks)
take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n)
# forward pass
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
for i, blk in enumerate(self.blocks):
x = blk(x)
if i in take_indices:
outputs.append(x)
return outputs
def get_intermediate_layers(
self,
x: torch.Tensor,
n: Union[int, Sequence] = 1,
reshape: bool = False,
return_prefix_tokens: bool = False,
norm: bool = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
""" Intermediate layer accessor (NOTE: This is a WIP experiment).
Inspired by DINO / DINOv2 interface
"""
# take last n blocks if n is an int, if in is a sequence, select by matching indices
outputs = self._intermediate_layers(x, n)
if norm:
outputs = [self.norm(out) for out in outputs]
prefix_tokens = [out[:, 0:self.num_prefix_tokens] for out in outputs]
outputs = [out[:, self.num_prefix_tokens:] for out in outputs]
if reshape:
grid_size = self.patch_embed.grid_size
outputs = [
out.reshape(x.shape[0], grid_size[0], grid_size[1], -1).permute(0, 3, 1, 2).contiguous()
for out in outputs
]
if return_prefix_tokens:
return tuple(zip(outputs, prefix_tokens))
return tuple(outputs)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
if getattr(self, "is_first_stage", True):
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.patch_drop(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
skip_last = max(1, len(self.blocks) - self.num_recomputing_layers)
x = checkpoint_seq(self.blocks, x, skip_last=skip_last)
else:
x = self.blocks(x)
if getattr(self, "is_last_stage", True):
x = self.norm(x)
return x
def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor:
if not getattr(self, "is_last_stage", True):
return x
if self.attn_pool is not None:
x = self.attn_pool(x)
elif self.global_pool == 'avg':
x = x[:, self.num_prefix_tokens:].mean(dim=1)
elif self.global_pool:
x = x[:, 0] # class token
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
if not self.ignore_head:
x = self.forward_head(x)
return x
def to_pipeline(self, pp_size, pp_rank, pp_splits: Optional[List[int]] = None):
self.is_first_stage = pp_rank == 0
self.is_last_stage = pp_rank == pp_size - 1
if not self.is_first_stage and hasattr(self, "patch_embed"):
del self.patch_embed, self.cls_token, self.reg_token, self.pos_embed, self.pos_drop, self.patch_drop, self.norm_pre
if not self.is_last_stage and hasattr(self, "norm"):
del self.norm, self.attn_pool, self.fc_norm, self.head_drop, self.head
if pp_splits is not None:
assert len(self.blocks) == sum(pp_splits)
splits = np.cumsum([0] + pp_splits)
self.blocks = self.blocks[splits[pp_rank]:splits[pp_rank + 1]]
return self
@dataclass
class SigLIPVisionCfg:
width: int = 1152
layers: Union[Tuple[int, int, int, int], int] = 27
heads: int = 16
patch_size: int = 14
image_size: Union[Tuple[int, int], int] = 336
global_pool: str = "map"
mlp_ratio: float = 3.7362
class_token: bool = False
num_classes: int = 0
use_checkpoint: bool = False
SigLIP_MODEL_CONFIG = {
"siglip_so400m_patch14_384": {
"image_size": 384,
"patch_size": 14,
"width": 1152,
"layers": 27,
"heads": 16,
"mlp_ratio": 3.7362,
"global_pool": "map",
"use_checkpoint": False
},
"siglip_so400m_patch14_224": {
"image_size": 224,
"patch_size": 14,
"width": 1152,
"layers": 27,
"heads": 16,
"mlp_ratio": 3.7362,
"global_pool": "map",
"use_checkpoint": False
},
"siglip_large_patch16_384": {
"image_size": 384,
"patch_size": 16,
"width": 1024,
"layers": 24,
"heads": 16,
"mlp_ratio": 4,
"global_pool": "map",
"use_checkpoint": False
}
}
def create_siglip_vit(
model_name: str = "siglip_so400m_patch14_384",
image_size: int = 384,
select_layer: int = -1,
ckpt_path: str = "",
**kwargs
):
assert model_name in SigLIP_MODEL_CONFIG.keys(), f"model name should be in {SigLIP_MODEL_CONFIG.keys()}"
vision_cfg = SigLIPVisionCfg(**SigLIP_MODEL_CONFIG[model_name])
if select_layer <= 0:
layers = min(vision_cfg.layers, vision_cfg.layers + select_layer + 1)
else:
layers = min(vision_cfg.layers, select_layer)
model = VisionTransformer(
img_size=image_size,
patch_size=vision_cfg.patch_size,
embed_dim=vision_cfg.width,
depth=layers,
num_heads=vision_cfg.heads,
mlp_ratio=vision_cfg.mlp_ratio,
class_token=vision_cfg.class_token,
global_pool=vision_cfg.global_pool,
ignore_head=kwargs.get("ignore_head", True),
weight_init=kwargs.get("weight_init", "skip"),
num_classes=0,
deterministic=kwargs.get("deterministic", False),
num_recomputing_layers=kwargs.get("num_recomputing_layers", 0)
)
if ckpt_path:
state_dict = torch.load(ckpt_path, map_location="cpu")
incompatible_keys = model.load_state_dict(state_dict, strict=False)
print(f"SigLIP-ViT restores from {ckpt_path},\n"
f"\tincompatible_keys:', {incompatible_keys}.")
return model

View File

View File

@ -0,0 +1,83 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from functools import wraps
import gradio as gr
def wrap_gen_fn(gen_fn):
@wraps(gen_fn)
def wrapped_gen_fn(prompt, *args, **kwargs):
try:
yield from gen_fn(prompt, *args, **kwargs)
except gr.Error as g_err:
raise g_err
except Exception as e:
raise gr.Error(f"Failed to generate text: {e}") from e
return wrapped_gen_fn
def delete_last_conversation(chatbot, history):
if len(history) % 2 != 0:
gr.Error("history length is not even")
return (
chatbot,
history,
"Delete Done",
)
if len(chatbot) > 0:
chatbot.pop()
if len(history) > 0 and len(history) % 2 == 0:
history.pop()
history.pop()
return (
chatbot,
history,
"Delete Done",
)
def reset_state():
return [], [], None, "Reset Done"
def reset_textbox():
return gr.update(value=""), ""
def cancel_outputing():
return "Stop Done"
class State:
interrupted = False
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
shared_state = State()

View File

@ -0,0 +1,81 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import annotations
import logging
from typing import List, Tuple
from deepseek_vl.serve.app_modules.presets import gr
from deepseek_vl.serve.app_modules.utils import convert_asis, convert_mdtext, detect_converted_mark
def compact_text_chunks(self, prompt, text_chunks: List[str]) -> List[str]:
logging.debug("Compacting text chunks...🚀🚀🚀")
combined_str = [c.strip() for c in text_chunks if c.strip()]
combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
combined_str = "\n\n".join(combined_str)
# resplit based on self.max_chunk_overlap
text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
return text_splitter.split_text(combined_str)
def postprocess(
self, y: List[Tuple[str | None, str | None]]
) -> List[Tuple[str | None, str | None]]:
"""
Parameters:
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
Returns:
List of tuples representing the message and response. Each message and response will be a string of HTML.
"""
if y is None or y == []:
return []
temp = []
for x in y:
user, bot = x
if not detect_converted_mark(user):
user = convert_asis(user)
if not detect_converted_mark(bot):
bot = convert_mdtext(bot)
temp.append((user, bot))
return temp
with open("deepseek_vl/serve/assets/custom.js", "r", encoding="utf-8") as f, open(
"deepseek_vl/serve/assets/Kelpy-Codos.js", "r", encoding="utf-8"
) as f2:
customJS = f.read()
kelpyCodos = f2.read()
def reload_javascript():
print("Reloading javascript...")
js = f"<script>{customJS}</script><script>{kelpyCodos}</script>"
def template_response(*args, **kwargs):
res = GradioTemplateResponseOriginal(*args, **kwargs)
res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = template_response
GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse

View File

@ -0,0 +1,115 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding:utf-8 -*-
import gradio as gr
title = """<h1 align="left" style="min-width:200px; margin-top:0;">Chat with DeepSeek-VL2 </h1>"""
description_top = """"""
description = """"""
CONCURRENT_COUNT = 1
MAX_EVENTS = 10
MAX_IMAGE_SIZE = 800
MIN_IMAGE_SIZE = 400
BOX2COLOR = {
0: (255, 0, 0),
1: (0, 255, 0),
2: (0, 0, 255),
3: (0, 255, 255),
4: (255, 255, 0),
5: (255, 0, 255),
6: (127, 127, 127),
7: (255, 255, 127),
8: (255, 127, 255),
9: (127, 255, 255),
10: (127, 127, 255),
11: (127, 255, 127),
12: (255, 127, 127),
}
ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
small_and_beautiful_theme = gr.themes.Soft(
primary_hue=gr.themes.Color(
c50="#EBFAF2",
c100="#CFF3E1",
c200="#A8EAC8",
c300="#77DEA9",
c400="#3FD086",
c500="#02C160",
c600="#06AE56",
c700="#05974E",
c800="#057F45",
c900="#04673D",
c950="#2E5541",
name="small_and_beautiful",
),
secondary_hue=gr.themes.Color(
c50="#576b95",
c100="#576b95",
c200="#576b95",
c300="#576b95",
c400="#576b95",
c500="#576b95",
c600="#576b95",
c700="#576b95",
c800="#576b95",
c900="#576b95",
c950="#576b95",
),
neutral_hue=gr.themes.Color(
name="gray",
c50="#f6f7f8",
# c100="#f3f4f6",
c100="#F2F2F2",
c200="#e5e7eb",
c300="#d1d5db",
c400="#B2B2B2",
c500="#808080",
c600="#636363",
c700="#515151",
c800="#393939",
# c900="#272727",
c900="#2B2B2B",
c950="#171717",
),
radius_size=gr.themes.sizes.radius_sm,
).set(
# button_primary_background_fill="*primary_500",
button_primary_background_fill_dark="*primary_600",
# button_primary_background_fill_hover="*primary_400",
# button_primary_border_color="*primary_500",
button_primary_border_color_dark="*primary_600",
button_primary_text_color="white",
button_primary_text_color_dark="white",
button_secondary_background_fill="*neutral_100",
button_secondary_background_fill_hover="*neutral_50",
button_secondary_background_fill_dark="*neutral_900",
button_secondary_text_color="*neutral_800",
button_secondary_text_color_dark="white",
# background_fill_primary="#F7F7F7",
# background_fill_primary_dark="#1F1F1F",
# block_title_text_color="*primary_500",
block_title_background_fill_dark="*primary_900",
block_label_background_fill_dark="*primary_900",
input_background_fill="#F6F6F6",
# chatbot_code_background_color_dark="*neutral_950",
)

View File

@ -0,0 +1,309 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -*- coding:utf-8 -*-
from __future__ import annotations
import html
import logging
import io
import os
import re
import base64
import time
from PIL import Image, ImageDraw, ImageFont
import mdtex2html
from markdown import markdown
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import ClassNotFound, get_lexer_by_name, guess_lexer
from deepseek_vl.serve.app_modules.presets import (
ALREADY_CONVERTED_MARK,
BOX2COLOR,
MAX_IMAGE_SIZE,
MIN_IMAGE_SIZE
)
logger = logging.getLogger("gradio_logger")
def configure_logger():
logger = logging.getLogger("gradio_logger")
logger.setLevel(logging.DEBUG)
timestr = time.strftime("%Y%m%d-%H%M%S")
os.makedirs("deepseek_vl/serve/logs", exist_ok=True)
file_handler = logging.FileHandler(
f"deepseek_vl/serve/logs/{timestr}_gradio_log.log"
)
console_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
console_handler.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
def strip_stop_words(x, stop_words):
for w in stop_words:
if w in x:
return x[: x.index(w)].strip()
return x.strip()
def format_output(history, text, x):
updated_history = history + [[text, x]]
a = [[y[0], convert_to_markdown(y[1])] for y in updated_history]
return a, updated_history
def markdown_to_html_with_syntax_highlight(md_str): # deprecated
def replacer(match):
lang = match.group(1) or "text"
code = match.group(2)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ValueError:
lexer = get_lexer_by_name("text", stripall=True)
formatter = HtmlFormatter()
highlighted_code = highlight(code, lexer, formatter)
return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
html_str = markdown(md_str)
return html_str
def normalize_markdown(md_text: str) -> str: # deprecated
lines = md_text.split("\n")
normalized_lines = []
inside_list = False
for i, line in enumerate(lines):
if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
if not inside_list and i > 0 and lines[i - 1].strip() != "":
normalized_lines.append("")
inside_list = True
normalized_lines.append(line)
elif inside_list and line.strip() == "":
if i < len(lines) - 1 and not re.match(
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
):
normalized_lines.append(line)
continue
else:
inside_list = False
normalized_lines.append(line)
return "\n".join(normalized_lines)
def convert_mdtext(md_text):
code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
code_blocks = code_block_pattern.findall(md_text)
non_code_parts = code_block_pattern.split(md_text)[::2]
result = []
for non_code, code in zip(non_code_parts, code_blocks + [""]):
if non_code.strip():
non_code = normalize_markdown(non_code)
if inline_code_pattern.search(non_code):
result.append(markdown(non_code, extensions=["tables"]))
else:
result.append(mdtex2html.convert(non_code, extensions=["tables"]))
if code.strip():
code = f"\n```{code}\n\n```"
code = markdown_to_html_with_syntax_highlight(code)
result.append(code)
result = "".join(result)
result += ALREADY_CONVERTED_MARK
return result
def convert_asis(userinput):
return f'<p style="white-space:pre-wrap;">{html.escape(userinput)}</p>{ALREADY_CONVERTED_MARK}'
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
return any(s.endswith(stop_word) for stop_word in stop_words)
def detect_converted_mark(userinput):
return bool(userinput.endswith(ALREADY_CONVERTED_MARK))
def detect_language(code):
first_line = "" if code.startswith("\n") else code.strip().split("\n", 1)[0]
language = first_line.lower() if first_line else ""
code_without_language = code[len(first_line) :].lstrip() if first_line else code
return language, code_without_language
def convert_to_markdown(text):
text = text.replace("$", "&#36;")
text = text.replace("\r\n", "\n")
def replace_leading_tabs_and_spaces(line):
new_line = []
for char in line:
if char == "\t":
new_line.append("&#9;")
elif char == " ":
new_line.append("&nbsp;")
else:
break
return "".join(new_line) + line[len(new_line) :]
markdown_text = ""
lines = text.split("\n")
in_code_block = False
for line in lines:
if in_code_block is False and line.startswith("```"):
in_code_block = True
markdown_text += f"{line}\n"
elif in_code_block is True and line.startswith("```"):
in_code_block = False
markdown_text += f"{line}\n"
elif in_code_block:
markdown_text += f"{line}\n"
else:
line = replace_leading_tabs_and_spaces(line)
line = re.sub(r"^(#)", r"\\\1", line)
markdown_text += f"{line} \n"
return markdown_text
def add_language_tag(text):
def detect_language(code_block):
try:
lexer = guess_lexer(code_block)
return lexer.name.lower()
except ClassNotFound:
return ""
code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE)
def replacement(match):
code_block = match.group(2)
if match.group(2).startswith("\n"):
language = detect_language(code_block)
return (
f"```{language}{code_block}```" if language else f"```\n{code_block}```"
)
else:
return match.group(1) + code_block + "```"
text2 = code_block_pattern.sub(replacement, text)
return text2
def is_variable_assigned(var_name: str) -> bool:
return var_name in locals()
def pil_to_base64(
image: Image.Image,
alt: str = "user upload image",
resize: bool = True,
max_size: int = MAX_IMAGE_SIZE,
min_size: int = MIN_IMAGE_SIZE
) -> str:
if resize:
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
shortest_edge = int(min(max_size / aspect_ratio, min_size, min_hw))
longest_edge = int(shortest_edge * aspect_ratio)
W, H = image.size
if H > W:
H, W = longest_edge, shortest_edge
else:
H, W = shortest_edge, longest_edge
image = image.resize((W, H))
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="{alt}" />'
return img_str
def parse_ref_bbox(response, image):
try:
image_h, image_w = image.size
draw = ImageDraw.Draw(image)
ref = re.findall(r'<\|ref\|>.*?<\|/ref\|>', response)
bbox = re.findall(r'<\|det\|>.*?<\|/det\|>', response)
assert len(ref) == len(bbox)
if len(ref) == 0:
return
boxes, labels = [], []
for box, label in zip(bbox, ref):
box = box.replace('<|det|>', '').replace('<|/det|>', '')
label = label.replace('<|ref|>', '').replace('<|/ref|>', '')
box = box[1:-1]
for onebox in re.findall(r'\[.*?\]', box):
boxes.append(eval(onebox))
labels.append(label)
for indice, (box, label) in enumerate(zip(boxes, labels)):
box = (
int(box[0] / 999 * image_h),
int(box[1] / 999 * image_w),
int(box[2] / 999 * image_h),
int(box[3] / 999 * image_w),
)
box_color = BOX2COLOR[indice % len(BOX2COLOR.keys())]
box_width = 3
draw.rectangle(box, outline=box_color, width=box_width)
text_x = box[0]
text_y = box[1] - 20
text_color = box_color
font = ImageFont.truetype('./deepseek_vl/serve/assets/simsun.ttc', size=20)
draw.text((text_x, text_y), label, font=font, fill=text_color)
return image
except:
return

View File

@ -0,0 +1,100 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// ==UserScript==
// @name Kelpy Codos
// @namespace https://github.com/Keldos-Li/Kelpy-Codos
// @version 1.0.5
// @author Keldos; https://keldos.me/
// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially.
// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22)
// @license GPL-3.0
// @grant none
// ==/UserScript==
(function () {
"use strict";
function addCopyButton(pre) {
var code = pre.querySelector("code");
if (!code) {
return; // 如果没有找到 <code> 元素,则不添加按钮
}
var firstChild = code.firstChild;
if (!firstChild) {
return; // 如果 <code> 元素没有子节点,则不添加按钮
}
var button = document.createElement("button");
button.textContent = "\uD83D\uDCCE"; // 使用 📎 符号作为“复制”按钮的文本
button.style.position = "relative";
button.style.float = "right";
button.style.fontSize = "1em"; // 可选:调整按钮大小
button.style.background = "none"; // 可选:去掉背景颜色
button.style.border = "none"; // 可选:去掉边框
button.style.cursor = "pointer"; // 可选:显示指针样式
button.addEventListener("click", function () {
var range = document.createRange();
range.selectNodeContents(code);
range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
var selection = window.getSelection();
selection.removeAllRanges();
selection.addRange(range);
try {
var success = document.execCommand("copy");
if (success) {
button.textContent = "\u2714";
setTimeout(function () {
button.textContent = "\uD83D\uDCCE"; // 恢复按钮为“复制”
}, 2000);
} else {
button.textContent = "\u2716";
}
} catch (e) {
console.error(e);
button.textContent = "\u2716";
}
selection.removeAllRanges();
});
code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
}
function handleNewElements(mutationsList, observer) {
for (var mutation of mutationsList) {
if (mutation.type === "childList") {
for (var node of mutation.addedNodes) {
if (node.nodeName === "PRE") {
addCopyButton(node);
}
}
}
}
}
var observer = new MutationObserver(handleNewElements);
observer.observe(document.documentElement, {
childList: true,
subtree: true,
});
document.querySelectorAll("pre").forEach(addCopyButton);
})();

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

View File

@ -0,0 +1,355 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
:root {
--chatbot-color-light: #f3f3f3;
--chatbot-color-dark: #121111;
}
/* status_display */
#status_display {
display: flex;
min-height: 2.5em;
align-items: flex-end;
justify-content: flex-end;
}
#status_display p {
font-size: 0.85em;
font-family: monospace;
color: var(--body-text-color-subdued);
}
/* usage_display */
#usage_display {
height: 1em;
}
#usage_display p {
padding: 0 1em;
font-size: 0.85em;
font-family: monospace;
color: var(--body-text-color-subdued);
}
/* list */
ol:not(.options),
ul:not(.options) {
padding-inline-start: 2em !important;
}
/* Thank @Keldos-Li for fixing it */
/* Light mode (default) */
#deepseek_chatbot {
background-color: var(--chatbot-color-light) !important;
color: #000000 !important;
}
[data-testid="bot"] {
background-color: #ffffff !important;
}
[data-testid="user"] {
background-color: #95ec69 !important;
}
/* Dark mode */
.dark #deepseek_chatbot {
background-color: var(--chatbot-color-dark) !important;
color: #ffffff !important;
}
.dark [data-testid="bot"] {
background-color: #2c2c2c !important;
}
.dark [data-testid="user"] {
background-color: #26b561 !important;
}
#deepseek_chatbot {
height: 100%;
min-height: 800px;
flex-grow: 1;
overflow: auto;
}
[class*="message"] {
border-radius: var(--radius-xl) !important;
border: none;
padding: var(--spacing-xl) !important;
font-size: var(--text-md) !important;
line-height: var(--line-md) !important;
min-height: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl));
min-width: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl));
}
[data-testid="bot"] {
max-width: 85%;
border-bottom-left-radius: 0 !important;
}
[data-testid="user"] {
max-width: 85%;
width: auto !important;
border-bottom-right-radius: 0 !important;
}
/* Table */
table {
margin: 1em 0;
border-collapse: collapse;
empty-cells: show;
}
td,
th {
border: 1.2px solid var(--border-color-primary) !important;
padding: 0.2em;
}
thead {
background-color: rgba(175, 184, 193, 0.2);
}
thead th {
padding: 0.5em 0.2em;
}
/* Inline code */
#deepseek_chatbot code {
display: inline;
white-space: break-spaces;
border-radius: 6px;
margin: 0 2px 0 2px;
padding: 0.2em 0.4em 0.1em 0.4em;
background-color: rgba(175, 184, 193, 0.2);
}
/* Code block */
#deepseek_chatbot pre code {
display: block;
overflow: auto;
white-space: pre;
background-color: #1c1d1e !important;
border-radius: 10px;
padding: 1.4em 1.2em 0em 1.4em;
margin: 1.2em 2em 1.2em 0.5em;
color: #fdf8f8;
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
}
/* Hightlight */
#deepseek_chatbot .highlight {
background-color: transparent;
}
#deepseek_chatbot .highlight .hll {
background-color: #49483e;
}
#deepseek_chatbot .highlight .c {
color: #75715e;
} /* Comment */
#deepseek_chatbot .highlight .err {
color: #960050;
background-color: #1e0010;
} /* Error */
#deepseek_chatbot .highlight .k {
color: #66d9ef;
} /* Keyword */
#deepseek_chatbot .highlight .l {
color: #ae81ff;
} /* Literal */
#deepseek_chatbot .highlight .n {
color: #f8f8f2;
} /* Name */
#deepseek_chatbot .highlight .o {
color: #f92672;
} /* Operator */
#deepseek_chatbot .highlight .p {
color: #f8f8f2;
} /* Punctuation */
#deepseek_chatbot .highlight .ch {
color: #75715e;
} /* Comment.Hashbang */
#deepseek_chatbot .highlight .cm {
color: #75715e;
} /* Comment.Multiline */
#deepseek_chatbot .highlight .cp {
color: #75715e;
} /* Comment.Preproc */
#deepseek_chatbot .highlight .cpf {
color: #75715e;
} /* Comment.PreprocFile */
#deepseek_chatbot .highlight .c1 {
color: #75715e;
} /* Comment.Single */
#deepseek_chatbot .highlight .cs {
color: #75715e;
} /* Comment.Special */
#deepseek_chatbot .highlight .gd {
color: #f92672;
} /* Generic.Deleted */
#deepseek_chatbot .highlight .ge {
font-style: italic;
} /* Generic.Emph */
#deepseek_chatbot .highlight .gi {
color: #a6e22e;
} /* Generic.Inserted */
#deepseek_chatbot .highlight .gs {
font-weight: bold;
} /* Generic.Strong */
#deepseek_chatbot .highlight .gu {
color: #75715e;
} /* Generic.Subheading */
#deepseek_chatbot .highlight .kc {
color: #66d9ef;
} /* Keyword.Constant */
#deepseek_chatbot .highlight .kd {
color: #66d9ef;
} /* Keyword.Declaration */
#deepseek_chatbot .highlight .kn {
color: #f92672;
} /* Keyword.Namespace */
#deepseek_chatbot .highlight .kp {
color: #66d9ef;
} /* Keyword.Pseudo */
#deepseek_chatbot .highlight .kr {
color: #66d9ef;
} /* Keyword.Reserved */
#deepseek_chatbot .highlight .kt {
color: #66d9ef;
} /* Keyword.Type */
#deepseek_chatbot .highlight .ld {
color: #e6db74;
} /* Literal.Date */
#deepseek_chatbot .highlight .m {
color: #ae81ff;
} /* Literal.Number */
#deepseek_chatbot .highlight .s {
color: #e6db74;
} /* Literal.String */
#deepseek_chatbot .highlight .na {
color: #a6e22e;
} /* Name.Attribute */
#deepseek_chatbot .highlight .nb {
color: #f8f8f2;
} /* Name.Builtin */
#deepseek_chatbot .highlight .nc {
color: #a6e22e;
} /* Name.Class */
#deepseek_chatbot .highlight .no {
color: #66d9ef;
} /* Name.Constant */
#deepseek_chatbot .highlight .nd {
color: #a6e22e;
} /* Name.Decorator */
#deepseek_chatbot .highlight .ni {
color: #f8f8f2;
} /* Name.Entity */
#deepseek_chatbot .highlight .ne {
color: #a6e22e;
} /* Name.Exception */
#deepseek_chatbot .highlight .nf {
color: #a6e22e;
} /* Name.Function */
#deepseek_chatbot .highlight .nl {
color: #f8f8f2;
} /* Name.Label */
#deepseek_chatbot .highlight .nn {
color: #f8f8f2;
} /* Name.Namespace */
#deepseek_chatbot .highlight .nx {
color: #a6e22e;
} /* Name.Other */
#deepseek_chatbot .highlight .py {
color: #f8f8f2;
} /* Name.Property */
#deepseek_chatbot .highlight .nt {
color: #f92672;
} /* Name.Tag */
#deepseek_chatbot .highlight .nv {
color: #f8f8f2;
} /* Name.Variable */
#deepseek_chatbot .highlight .ow {
color: #f92672;
} /* Operator.Word */
#deepseek_chatbot .highlight .w {
color: #f8f8f2;
} /* Text.Whitespace */
#deepseek_chatbot .highlight .mb {
color: #ae81ff;
} /* Literal.Number.Bin */
#deepseek_chatbot .highlight .mf {
color: #ae81ff;
} /* Literal.Number.Float */
#deepseek_chatbot .highlight .mh {
color: #ae81ff;
} /* Literal.Number.Hex */
#deepseek_chatbot .highlight .mi {
color: #ae81ff;
} /* Literal.Number.Integer */
#deepseek_chatbot .highlight .mo {
color: #ae81ff;
} /* Literal.Number.Oct */
#deepseek_chatbot .highlight .sa {
color: #e6db74;
} /* Literal.String.Affix */
#deepseek_chatbot .highlight .sb {
color: #e6db74;
} /* Literal.String.Backtick */
#deepseek_chatbot .highlight .sc {
color: #e6db74;
} /* Literal.String.Char */
#deepseek_chatbot .highlight .dl {
color: #e6db74;
} /* Literal.String.Delimiter */
#deepseek_chatbot .highlight .sd {
color: #e6db74;
} /* Literal.String.Doc */
#deepseek_chatbot .highlight .s2 {
color: #e6db74;
} /* Literal.String.Double */
#deepseek_chatbot .highlight .se {
color: #ae81ff;
} /* Literal.String.Escape */
#deepseek_chatbot .highlight .sh {
color: #e6db74;
} /* Literal.String.Heredoc */
#deepseek_chatbot .highlight .si {
color: #e6db74;
} /* Literal.String.Interpol */
#deepseek_chatbot .highlight .sx {
color: #e6db74;
} /* Literal.String.Other */
#deepseek_chatbot .highlight .sr {
color: #e6db74;
} /* Literal.String.Regex */
#deepseek_chatbot .highlight .s1 {
color: #e6db74;
} /* Literal.String.Single */
#deepseek_chatbot .highlight .ss {
color: #e6db74;
} /* Literal.String.Symbol */
#deepseek_chatbot .highlight .bp {
color: #f8f8f2;
} /* Name.Builtin.Pseudo */
#deepseek_chatbot .highlight .fm {
color: #a6e22e;
} /* Name.Function.Magic */
#deepseek_chatbot .highlight .vc {
color: #f8f8f2;
} /* Name.Variable.Class */
#deepseek_chatbot .highlight .vg {
color: #f8f8f2;
} /* Name.Variable.Global */
#deepseek_chatbot .highlight .vi {
color: #f8f8f2;
} /* Name.Variable.Instance */
#deepseek_chatbot .highlight .vm {
color: #f8f8f2;
} /* Name.Variable.Magic */
#deepseek_chatbot .highlight .il {
color: #ae81ff;
} /* Literal.Number.Integer.Long */

View File

@ -0,0 +1,22 @@
/**
* Copyright (c) 2023-2024 DeepSeek.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// custom javascript here

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

172
deepseek_vl/serve/inference.py Executable file
View File

@ -0,0 +1,172 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from threading import Thread
from typing import List
import torch
import transformers
from joblib.externals.cloudpickle import instance
from transformers import (
AutoModelForCausalLM,
StoppingCriteria,
StoppingCriteriaList,
TextIteratorStreamer,
)
from deepseek_vl.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
from deepseek_vl.models.conversation import Conversation
def load_model(model_path, dtype=torch.bfloat16):
vl_chat_processor = DeepseekVLV2Processor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True, torch_dtype=dtype
)
vl_gpt = vl_gpt.cuda().eval()
return tokenizer, vl_gpt, vl_chat_processor
def convert_conversation_to_prompts(conversation: Conversation):
conv_prompts = []
pil_images = []
messages = conversation.messages
for i in range(0, len(messages), 2):
if isinstance(messages[i][1], tuple):
text, images = messages[i][1]
else:
text, images = messages[i][1], []
pil_images.extend(images)
prompt = {
"role": messages[i][0],
"content": text,
}
response = {"role": messages[i + 1][0], "content": messages[i + 1][1]}
conv_prompts.extend([prompt, response])
return conv_prompts, pil_images
class StoppingCriteriaSub(StoppingCriteria):
def __init__(self, stops=[], encounters=1):
super().__init__()
self.stops = [stop.to("cuda") for stop in stops]
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
):
for stop in self.stops:
if input_ids.shape[-1] < len(stop):
continue
if torch.all((stop == input_ids[0][-len(stop) :])).item():
return True
return False
@torch.inference_mode()
def deepseek_generate(
conv_prompts: list,
pil_images: list,
vl_gpt: torch.nn.Module,
vl_chat_processor: DeepseekVLV2Processor,
tokenizer: transformers.PreTrainedTokenizer,
stop_words: list,
max_length: int = 256,
temperature: float = 1.0,
top_p: float = 1.0,
repetition_penalty=1.1,
):
prepare_inputs = vl_chat_processor.__call__(
conversations=conv_prompts,
images=pil_images,
inference_mode=True,
force_batchify=True,
system_prompt=""
).to(vl_gpt.device)
return generate(
vl_gpt,
tokenizer,
prepare_inputs,
max_length,
temperature,
repetition_penalty,
top_p,
stop_words,
)
@torch.inference_mode()
def generate(
vl_gpt,
tokenizer,
prepare_inputs,
max_gen_len: int = 256,
temperature: float = 0,
repetition_penalty=1.1,
top_p: float = 0.95,
stop_words: List[str] = [],
):
"""Stream the text output from the multimodality model with prompt and image inputs."""
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
streamer = TextIteratorStreamer(tokenizer)
stop_words_ids = [
torch.tensor(tokenizer.encode(stop_word)) for stop_word in stop_words
]
stopping_criteria = StoppingCriteriaList(
[StoppingCriteriaSub(stops=stop_words_ids)]
)
generation_config = dict(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=max_gen_len,
do_sample=True,
use_cache=True,
streamer=streamer,
stopping_criteria=stopping_criteria,
)
if temperature > 0:
generation_config.update(
{
"do_sample": True,
"top_p": top_p,
"temperature": temperature,
"repetition_penalty": repetition_penalty,
}
)
else:
generation_config["do_sample"] = False
thread = Thread(target=vl_gpt.generate, kwargs=generation_config)
thread.start()
yield from streamer

View File

@ -0,0 +1,18 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

80
deepseek_vl/utils/io.py Normal file
View File

@ -0,0 +1,80 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from typing import Dict, List
import PIL.Image
import torch
from transformers import AutoModelForCausalLM
def load_pretrained_model(model_path: str):
from deepseek_vl.models.processing_deepseek_vl_v2 import DeepseekVLV2Processor
from deepseek_vl.models.modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM
vl_chat_processor = DeepseekVLV2Processor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
return tokenizer, vl_chat_processor, vl_gpt
def load_pil_images(conversations: List[Dict[str, str]]) -> List[PIL.Image.Image]:
"""
Args:
conversations (List[Dict[str, str]]): the conversations with a list of messages. An example is :
[
{
"role": "User",
"content": "<image>\nExtract all information from this image and convert them into markdown format.",
"images": ["./examples/table_datasets.png"]
},
{"role": "Assistant", "content": ""},
]
Returns:
pil_images (List[PIL.Image.Image]): the list of PIL images.
"""
pil_images = []
for message in conversations:
if "images" not in message:
continue
for image_path in message["images"]:
pil_img = PIL.Image.open(image_path)
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
return pil_images
def load_json(filepath):
with open(filepath, "r") as f:
data = json.load(f)
return data

1
images/badge.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.0 KiB

BIN
images/dog_a.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

BIN
images/dog_b.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 356 KiB

BIN
images/dog_c.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 418 KiB

BIN
images/dog_d.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 363 KiB

BIN
images/github_demo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
images/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

22
images/logo.svg Normal file
View File

@ -0,0 +1,22 @@
<svg width="195.000000" height="41.359375" viewBox="0 0 195 41.3594" fill="none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<desc>
Created with Pixso.
</desc>
<defs>
<clipPath id="clip30_2029">
<rect id="_图层_1" width="134.577469" height="25.511124" transform="translate(60.422485 10.022217)" fill="white"/>
</clipPath>
</defs>
<g clip-path="url(#clip30_2029)">
<path id="path" d="M119.508 30.113L117.562 30.113L117.562 27.0967L119.508 27.0967C120.713 27.0967 121.931 26.7961 122.715 25.9614C123.5 25.1265 123.796 23.8464 123.796 22.5664C123.796 21.2864 123.512 20.0063 122.715 19.1716C121.919 18.3369 120.713 18.0364 119.508 18.0364C118.302 18.0364 117.085 18.3369 116.3 19.1716C115.515 20.0063 115.219 21.2864 115.219 22.5664L115.219 34.9551L111.806 34.9551L111.806 15.031L115.219 15.031L115.219 16.2998L115.845 16.2998C115.913 16.2219 115.981 16.1553 116.049 16.0884C116.903 15.3093 118.211 15.031 119.496 15.031C121.51 15.031 123.523 15.532 124.843 16.9233C126.162 18.3145 126.629 20.4517 126.629 22.5776C126.629 24.7036 126.151 26.8296 124.843 28.2319C123.535 29.6345 121.51 30.113 119.508 30.113Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M67.5664 15.5654L69.5117 15.5654L69.5117 18.5818L67.5664 18.5818C66.3606 18.5818 65.1434 18.8823 64.3585 19.717C63.5736 20.552 63.2778 21.832 63.2778 23.1121C63.2778 24.3921 63.5623 25.6721 64.3585 26.5068C65.1548 27.3418 66.3606 27.6423 67.5664 27.6423C68.7722 27.6423 69.9895 27.3418 70.7744 26.5068C71.5593 25.6721 71.8551 24.3921 71.8551 23.1121L71.8551 10.7124L75.2677 10.7124L75.2677 30.6475L71.8551 30.6475L71.8551 29.3787L71.2294 29.3787C71.1611 29.4565 71.0929 29.5234 71.0247 29.5901C70.1715 30.3691 68.8633 30.6475 67.5779 30.6475C65.5643 30.6475 63.5509 30.1467 62.2313 28.7554C60.9117 27.364 60.4453 25.2268 60.4453 23.1008C60.4453 20.9749 60.9231 18.8489 62.2313 17.4465C63.5509 16.0552 65.5643 15.5654 67.5664 15.5654Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M92.3881 22.845L92.3881 24.0581L83.299 24.0581L83.299 21.6428L89.328 21.6428C89.1914 20.7634 88.8729 19.9397 88.3042 19.3386C87.4851 18.4705 86.2224 18.1589 84.9711 18.1589C83.7198 18.1589 82.4572 18.4705 81.6381 19.3386C80.819 20.2068 80.5232 21.5315 80.5232 22.845C80.5232 24.1582 80.819 25.4939 81.6381 26.3511C82.4572 27.208 83.7198 27.531 84.9711 27.531C86.2224 27.531 87.4851 27.2192 88.3042 26.3511C88.418 26.2285 88.5203 26.095 88.6227 25.9614L91.9899 25.9614C91.6941 27.0078 91.2277 27.9539 90.5225 28.6885C89.1573 30.1243 87.0529 30.6475 84.9711 30.6475C82.8894 30.6475 80.7849 30.1355 79.4198 28.6885C78.0547 27.2415 77.5542 25.0376 77.5542 22.845C77.5542 20.6521 78.0433 18.437 79.4198 17.0012C80.7963 15.5654 82.8894 15.0422 84.9711 15.0422C87.0529 15.0422 89.1573 15.5542 90.5225 17.0012C91.8988 18.4482 92.3881 20.6521 92.3881 22.845Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M109.52 22.845L109.52 24.0581L100.431 24.0581L100.431 21.6428L106.46 21.6428C106.323 20.7634 106.005 19.9397 105.436 19.3386C104.617 18.4705 103.354 18.1589 102.103 18.1589C100.852 18.1589 99.5889 18.4705 98.7698 19.3386C97.9507 20.2068 97.6549 21.5315 97.6549 22.845C97.6549 24.1582 97.9507 25.4939 98.7698 26.3511C99.5889 27.208 100.852 27.531 102.103 27.531C103.354 27.531 104.617 27.2192 105.436 26.3511C105.55 26.2285 105.652 26.095 105.754 25.9614L109.122 25.9614C108.826 27.0078 108.359 27.9539 107.654 28.6885C106.289 30.1243 104.185 30.6475 102.103 30.6475C100.021 30.6475 97.9166 30.1355 96.5515 28.6885C95.1864 27.2415 94.6859 25.0376 94.6859 22.845C94.6859 20.6521 95.175 18.437 96.5515 17.0012C97.928 15.5654 100.021 15.0422 102.103 15.0422C104.185 15.0422 106.289 15.5542 107.654 17.0012C109.031 18.4482 109.52 20.6521 109.52 22.845Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M136.355 30.6475C138.437 30.6475 140.541 30.3469 141.906 29.49C143.271 28.6328 143.772 27.3306 143.772 26.0393C143.772 24.7483 143.282 23.4348 141.906 22.5889C140.541 21.7429 138.437 21.4312 136.355 21.4312C135.467 21.4312 134.648 21.3088 134.068 20.9861C133.488 20.6521 133.272 20.1511 133.272 19.6504C133.272 19.1494 133.477 18.6375 134.068 18.3147C134.648 17.9807 135.547 17.8694 136.434 17.8694C137.322 17.8694 138.22 17.9919 138.801 18.3147C139.381 18.6487 139.597 19.1494 139.597 19.6504L143.066 19.6504C143.066 18.3591 142.623 17.0457 141.383 16.2C140.143 15.354 138.243 15.0422 136.355 15.0422C134.466 15.0422 132.567 15.3428 131.327 16.2C130.087 17.0569 129.643 18.3591 129.643 19.6504C129.643 20.9414 130.087 22.2549 131.327 23.1008C132.567 23.9468 134.466 24.2585 136.355 24.2585C137.333 24.2585 138.414 24.3809 139.062 24.7036C139.711 25.0266 139.938 25.5386 139.938 26.0393C139.938 26.5403 139.711 27.0522 139.062 27.375C138.414 27.6978 137.424 27.8203 136.446 27.8203C135.467 27.8203 134.466 27.6978 133.829 27.375C133.192 27.0522 132.953 26.5403 132.953 26.0393L128.949 26.0393C128.949 27.3306 129.438 28.644 130.815 29.49C132.191 30.3359 134.273 30.6475 136.355 30.6475Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M160.903 22.845L160.903 24.0581L151.814 24.0581L151.814 21.6428L157.843 21.6428C157.707 20.7634 157.388 19.9397 156.82 19.3386C156 18.4705 154.738 18.1589 153.486 18.1589C152.235 18.1589 150.972 18.4705 150.153 19.3386C149.334 20.2068 149.039 21.5315 149.039 22.845C149.039 24.1582 149.334 25.4939 150.153 26.3511C150.972 27.208 152.235 27.531 153.486 27.531C154.738 27.531 156 27.2192 156.82 26.3511C156.933 26.2285 157.036 26.095 157.138 25.9614L160.505 25.9614C160.209 27.0078 159.743 27.9539 159.038 28.6885C157.673 30.1243 155.568 30.6475 153.486 30.6475C151.405 30.6475 149.3 30.1355 147.935 28.6885C146.57 27.2415 146.07 25.0376 146.07 22.845C146.07 20.6521 146.559 18.437 147.935 17.0012C149.312 15.5654 151.405 15.0422 153.486 15.0422C155.568 15.0422 157.673 15.5542 159.038 17.0012C160.414 18.4482 160.903 20.6521 160.903 22.845Z" fill-rule="nonzero" fill="#4D6BFE"/>
<path id="path" d="M178.035 22.845L178.035 24.0581L168.946 24.0581L168.946 21.6428L174.975 21.6428C174.839 20.7634 174.52 19.9397 173.951 19.3386C173.132 18.4705 171.87 18.1589 170.618 18.1589C169.367 18.1589 168.104 18.4705 167.285 19.3386C166.466 20.2068 166.17 21.5315 166.17 22.845C166.17 24.1582 166.466 25.4939 167.285 26.3511C168.104 27.208 169.367 27.531 170.618 27.531C171.87 27.531 173.132 27.2192 173.951 26.3511C174.065 26.2285 174.167 26.095 174.27 25.9614L177.637 25.9614C177.341 27.0078 176.875 27.9539 176.17 28.6885C174.804 30.1243 172.7 30.6475 170.618 30.6475C168.536 30.6475 166.432 30.1355 165.067 28.6885C163.702 27.2415 163.201 25.0376 163.201 22.845C163.201 20.6521 163.69 18.437 165.067 17.0012C166.443 15.5654 168.536 15.0422 170.618 15.0422C172.7 15.0422 174.804 15.5542 176.17 17.0012C177.546 18.4482 178.035 20.6521 178.035 22.845Z" fill-rule="nonzero" fill="#4D6BFE"/>
<rect id="rect" x="180.321533" y="10.022217" width="3.412687" height="20.625223" fill="#4D6BFE"/>
<path id="polygon" d="M189.559 22.3772L195.155 30.6475L190.935 30.6475L185.338 22.3772L190.935 15.7322L195.155 15.7322L189.559 22.3772Z" fill-rule="nonzero" fill="#4D6BFE"/>
</g>
<path id="path" d="M55.6128 3.47119C55.0175 3.17944 54.7611 3.73535 54.413 4.01782C54.2939 4.10889 54.1932 4.22729 54.0924 4.33667C53.2223 5.26587 52.2057 5.87646 50.8776 5.80347C48.9359 5.69409 47.2781 6.30469 45.8126 7.78979C45.5012 5.9585 44.4663 4.86499 42.8909 4.16357C42.0667 3.79907 41.2332 3.43457 40.6561 2.64185C40.2532 2.07715 40.1432 1.44849 39.9418 0.828857C39.8135 0.455322 39.6853 0.0725098 39.2548 0.00878906C38.7877 -0.0639648 38.6045 0.327637 38.4213 0.655762C37.6886 1.99512 37.4047 3.47119 37.4321 4.96533C37.4962 8.32739 38.9159 11.0059 41.7369 12.9102C42.0575 13.1289 42.1399 13.3474 42.0392 13.6665C41.8468 14.3225 41.6178 14.9602 41.4164 15.6162C41.2881 16.0354 41.0957 16.1265 40.647 15.9441C39.0991 15.2974 37.7618 14.3406 36.5803 13.1836C34.5745 11.2429 32.761 9.10181 30.4988 7.42529C29.9675 7.03345 29.4363 6.66919 28.8867 6.32275C26.5786 4.08154 29.189 2.24097 29.7935 2.02246C30.4254 1.79468 30.0133 1.01099 27.9708 1.02026C25.9283 1.0293 24.0599 1.71265 21.6786 2.62378C21.3306 2.7605 20.9641 2.8606 20.5886 2.94263C18.4271 2.53271 16.1831 2.44141 13.8384 2.70581C9.42371 3.19775 5.89758 5.28418 3.30554 8.84668C0.191406 13.1289 -0.54126 17.9941 0.356323 23.0691C1.29968 28.4172 4.02905 32.8452 8.22388 36.3076C12.5745 39.8972 17.5845 41.6558 23.2997 41.3186C26.771 41.1182 30.6361 40.6536 34.9958 36.9636C36.0948 37.5103 37.2489 37.7288 39.1632 37.8928C40.6378 38.0295 42.0575 37.8201 43.1565 37.5923C44.8784 37.2278 44.7594 35.6333 44.1366 35.3418C39.09 32.9912 40.1981 33.9478 39.1907 33.1733C41.7552 30.1394 45.6204 26.9868 47.1316 16.7732C47.2506 15.9624 47.1499 15.4521 47.1316 14.7961C47.1224 14.3953 47.214 14.2405 47.672 14.1948C48.9359 14.0491 50.1632 13.7029 51.2898 13.0833C54.5596 11.2976 55.8784 8.36377 56.1898 4.84692C56.2357 4.30933 56.1807 3.75342 55.6128 3.47119ZM27.119 35.123C22.2281 31.2783 19.856 30.0117 18.8759 30.0664C17.96 30.1211 18.1249 31.1689 18.3263 31.8523C18.537 32.5264 18.8118 32.9912 19.1964 33.5833C19.462 33.9751 19.6453 34.5581 18.9309 34.9956C17.3555 35.9705 14.6169 34.6675 14.4886 34.6038C11.3014 32.7268 8.63611 30.2485 6.75842 26.8594C4.94495 23.5974 3.89172 20.0989 3.71765 16.3633C3.67188 15.4614 3.9375 15.1423 4.83508 14.9785C6.0166 14.7598 7.23474 14.7141 8.41626 14.8872C13.408 15.6162 17.6577 17.8484 21.2206 21.3835C23.2539 23.397 24.7926 25.8025 26.3772 28.1531C28.0624 30.6494 29.8759 33.0276 32.184 34.9773C32.9991 35.6606 33.6494 36.1799 34.2722 36.5627C32.3947 36.7722 29.2622 36.8179 27.119 35.123ZM29.4637 20.0442C29.4637 19.6433 29.7843 19.3245 30.1874 19.3245C30.2789 19.3245 30.3613 19.3425 30.4346 19.3699C30.5354 19.4065 30.627 19.4612 30.7002 19.543C30.8285 19.6707 30.9017 19.8528 30.9017 20.0442C30.9017 20.4451 30.5812 20.7639 30.1782 20.7639C29.7751 20.7639 29.4637 20.4451 29.4637 20.0442ZM36.7452 23.7798C36.2781 23.9712 35.811 24.135 35.3622 24.1533C34.6661 24.1897 33.9059 23.9072 33.4938 23.561C32.8527 23.0234 32.3947 22.7229 32.2023 21.7844C32.1199 21.3835 32.1656 20.7639 32.239 20.4087C32.4038 19.6433 32.2206 19.1514 31.6803 18.7048C31.2406 18.3403 30.6819 18.2402 30.0682 18.2402C29.8392 18.2402 29.6287 18.1399 29.4729 18.0579C29.2164 17.9304 29.0059 17.6116 29.2073 17.2197C29.2714 17.0923 29.5829 16.7825 29.6561 16.7278C30.4896 16.2539 31.4513 16.4089 32.3397 16.7642C33.1641 17.1013 33.7869 17.7209 34.6844 18.5955C35.6003 19.6523 35.7651 19.9441 36.2872 20.7366C36.6995 21.3562 37.075 21.9939 37.3314 22.7229C37.4871 23.1785 37.2856 23.552 36.7452 23.7798Z" fill-rule="nonzero" fill="#4D6BFE"/>
</svg>

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 217 KiB

BIN
images/vl2_teaser.jpeg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

137
inference.py Normal file
View File

@ -0,0 +1,137 @@
# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from argparse import ArgumentParser
from typing import List, Dict
import torch
from transformers import AutoModelForCausalLM
import PIL.Image
from deepseek_vl.models import DeepseekVLV2ForCausalLM, DeepseekVLV2Processor
from deepseek_vl.serve.app_modules.utils import parse_ref_bbox
def load_pil_images(conversations: List[Dict[str, str]]) -> List[PIL.Image.Image]:
"""
Args:
conversations (List[Dict[str, str]]): the conversations with a list of messages. An example is :
[
{
"role": "User",
"content": "<image>\nExtract all information from this image and convert them into markdown format.",
"images": ["./examples/table_datasets.png"]
},
{"role": "Assistant", "content": ""},
]
Returns:
pil_images (List[PIL.Image.Image]): the list of PIL images.
"""
pil_images = []
for message in conversations:
if "images" not in message:
continue
for image_path in message["images"]:
pil_img = PIL.Image.open(image_path)
pil_img = pil_img.convert("RGB")
pil_images.append(pil_img)
return pil_images
def main(args):
dtype = torch.bfloat16
# specify the path to the model
model_path = args.model_path
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(
model_path,
trust_remote_code=True,
torch_dtype=dtype
)
vl_gpt = vl_gpt.cuda().eval()
# single image conversation example
conversation = [
{
"role": "<|User|>",
"content": "<image>\n<|ref|>The giraffe at the back.<|/ref|>.",
"images": ["./images/visual_grounding.jpeg"],
},
{"role": "<|Assistant|>", "content": ""},
]
# load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor.__call__(
conversations=conversation,
images=pil_images,
force_batchify=True,
system_prompt=""
).to(vl_gpt.device, dtype=dtype)
with torch.no_grad():
# run image encoder to get the image embeddings
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
# run the model to get the response
outputs = vl_gpt.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=1024,
do_sample=False,
# repetition_penalty=1.1,
# do_sample=True,
# temperature=1.0,
# top_p=0.9,
# repetition_penalty=1.1,
use_cache=True,
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=False)
print(f"{prepare_inputs['sft_format'][0]}", answer)
vg_image = parse_ref_bbox(answer, image=pil_images[0])
if vg_image is not None:
vg_image.save("./vg.jpg", format="JPEG", quality=85)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model_path", type=str, required=True,
default="deepseek-ai/deepseek-vl2-27b-moe",
help="model name or local path to the model")
args = parser.parse_args()
main(args)

53
pyproject.toml Normal file
View File

@ -0,0 +1,53 @@
[build-system]
requires = ["setuptools>=40.6.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "deepseek_vl"
version = "1.0.0"
description = "DeepSeek-VL"
authors = [{name = "DeepSeek-AI"}]
license = {file = "LICENSE-CODE"}
urls = {homepage = "https://github.com/deepseek-ai/DeepSeek-VL"}
readme = "README.md"
requires-python = ">=3.8"
dependencies = [
"torch>=2.0.1",
"transformers>=4.38.2",
"timm>=0.9.16",
"accelerate",
"sentencepiece",
"attrdict",
"einops",
]
[project.optional-dependencies]
gradio = [
"gradio==3.48.0",
"gradio-client==0.6.1",
"mdtex2html==1.3.0",
"pypinyin==0.50.0",
"tiktoken==0.5.2",
"tqdm==4.64.0",
"colorama==0.4.5",
"Pygments==2.12.0",
"markdown==3.4.1",
"SentencePiece==0.1.96"
]
lint = [
"isort",
"black[jupyter] >= 22.6.0",
"pylint[spelling] >= 2.15.0",
"flake8",
"flake8-bugbear",
"flake8-comprehensions",
"flake8-docstrings",
"flake8-pyi",
"flake8-simplify",
"ruff",
"pyenchant",
"pre-commit",
]
[tool.setuptools]
packages = {find = {exclude = ["images"]}}

19
requirements.txt Normal file
View File

@ -0,0 +1,19 @@
torch==2.0.1
transformers>=4.38.2
timm>=0.9.16
accelerate
sentencepiece
attrdict
einops
# for gradio demo
gradio==3.48.0
gradio-client==0.6.1
mdtex2html==1.3.0
pypinyin==0.50.0
tiktoken==0.5.2
tqdm==4.64.0
colorama==0.4.5
Pygments==2.12.0
markdown==3.4.1
SentencePiece==0.1.96