mirror of
https://github.com/deepseek-ai/Janus.git
synced 2025-04-18 17:48:57 -04:00
change gradio demo
This commit is contained in:
parent
a42ad6dab3
commit
a897652664
@ -11,35 +11,58 @@ import time
|
||||
# import spaces # Import spaces for ZeroGPU compatibility
|
||||
|
||||
|
||||
# Load model and processor
|
||||
model_path = "deepseek-ai/Janus-Pro-7B"
|
||||
config = AutoConfig.from_pretrained(model_path)
|
||||
language_config = config.language_config
|
||||
language_config._attn_implementation = 'eager'
|
||||
vl_gpt = AutoModelForCausalLM.from_pretrained(model_path,
|
||||
language_config=language_config,
|
||||
trust_remote_code=True)
|
||||
if torch.cuda.is_available():
|
||||
vl_gpt = vl_gpt.to(torch.bfloat16).cuda()
|
||||
else:
|
||||
vl_gpt = vl_gpt.to(torch.float16)
|
||||
|
||||
vl_chat_processor = VLChatProcessor.from_pretrained(model_path)
|
||||
tokenizer = vl_chat_processor.tokenizer
|
||||
# Global variables to store model and processor (initially for 7B)
|
||||
vl_gpt = None
|
||||
vl_chat_processor = None
|
||||
tokenizer = None
|
||||
cuda_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
current_model_path = "deepseek-ai/Janus-Pro-7B" # Default model
|
||||
|
||||
def load_model_components(model_path):
|
||||
global vl_gpt, vl_chat_processor, tokenizer, current_model_path # Declare current_model_path as global here
|
||||
|
||||
if vl_gpt is not None and current_model_path == model_path:
|
||||
print(f"Using cached model: {model_path}")
|
||||
return vl_gpt, vl_chat_processor, tokenizer
|
||||
|
||||
print(f"Loading model: {model_path}")
|
||||
config = AutoConfig.from_pretrained(model_path)
|
||||
language_config = config.language_config
|
||||
language_config._attn_implementation = 'eager'
|
||||
vl_gpt_local = AutoModelForCausalLM.from_pretrained(model_path,
|
||||
language_config=language_config,
|
||||
trust_remote_code=True)
|
||||
if torch.cuda.is_available():
|
||||
vl_gpt_local = vl_gpt_local.to(torch.bfloat16).cuda()
|
||||
else:
|
||||
vl_gpt_local = vl_gpt_local.to(torch.float16)
|
||||
|
||||
vl_chat_processor_local = VLChatProcessor.from_pretrained(model_path)
|
||||
tokenizer_local = vl_chat_processor_local.tokenizer
|
||||
|
||||
vl_gpt = vl_gpt_local
|
||||
vl_chat_processor = vl_chat_processor_local
|
||||
tokenizer = tokenizer_local
|
||||
current_model_path = model_path
|
||||
print(f"Model loaded: {model_path}")
|
||||
return vl_gpt, vl_chat_processor, tokenizer
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
# @spaces.GPU(duration=120)
|
||||
# @spaces.GPU(duration=120)
|
||||
# Multimodal Understanding function
|
||||
def multimodal_understanding(image, question, seed, top_p, temperature):
|
||||
def multimodal_understanding(model_name, image, question, seed, top_p, temperature):
|
||||
# Load model based on selection
|
||||
load_model_components(model_name)
|
||||
|
||||
# Clear CUDA cache before generating
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
# set seed
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.cuda.manual_seed(seed)
|
||||
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "<|User|>",
|
||||
@ -48,15 +71,15 @@ def multimodal_understanding(image, question, seed, top_p, temperature):
|
||||
},
|
||||
{"role": "<|Assistant|>", "content": ""},
|
||||
]
|
||||
|
||||
|
||||
pil_images = [Image.fromarray(image)]
|
||||
prepare_inputs = vl_chat_processor(
|
||||
conversations=conversation, images=pil_images, force_batchify=True
|
||||
).to(cuda_device, dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float16)
|
||||
|
||||
|
||||
|
||||
|
||||
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
||||
|
||||
|
||||
outputs = vl_gpt.language_model.generate(
|
||||
inputs_embeds=inputs_embeds,
|
||||
attention_mask=prepare_inputs.attention_mask,
|
||||
@ -69,7 +92,7 @@ def multimodal_understanding(image, question, seed, top_p, temperature):
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
)
|
||||
|
||||
|
||||
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
||||
return answer
|
||||
|
||||
@ -84,7 +107,7 @@ def generate(input_ids,
|
||||
patch_size: int = 16):
|
||||
# Clear CUDA cache before generating
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
tokens = torch.zeros((parallel_size * 2, len(input_ids)), dtype=torch.int).to(cuda_device)
|
||||
for i in range(parallel_size * 2):
|
||||
tokens[i, :] = input_ids
|
||||
@ -113,7 +136,6 @@ def generate(input_ids,
|
||||
img_embeds = vl_gpt.prepare_gen_img_embeds(next_token)
|
||||
inputs_embeds = img_embeds.unsqueeze(dim=1)
|
||||
|
||||
|
||||
|
||||
patches = vl_gpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int),
|
||||
shape=[parallel_size, 8, width // patch_size, height // patch_size])
|
||||
@ -133,10 +155,10 @@ def unpack(dec, width, height, parallel_size=5):
|
||||
|
||||
@torch.inference_mode()
|
||||
# @spaces.GPU(duration=120) # Specify a duration to avoid timeout
|
||||
def generate_image(prompt,
|
||||
seed=None,
|
||||
guidance=5,
|
||||
t2i_temperature=1.0):
|
||||
def generate_image(model_name, prompt, seed, guidance, t2i_temperature, parallel_size_slider):
|
||||
# Load model based on selection
|
||||
load_model_components(model_name)
|
||||
|
||||
# Clear CUDA cache and avoid tracking gradients
|
||||
torch.cuda.empty_cache()
|
||||
# Set the seed for reproducible results
|
||||
@ -146,8 +168,8 @@ def generate_image(prompt,
|
||||
np.random.seed(seed)
|
||||
width = 384
|
||||
height = 384
|
||||
parallel_size = 5
|
||||
|
||||
parallel_size = int(parallel_size_slider) # Use slider value for parallel_size
|
||||
|
||||
with torch.no_grad():
|
||||
messages = [{'role': '<|User|>', 'content': prompt},
|
||||
{'role': '<|Assistant|>', 'content': ''}]
|
||||
@ -155,7 +177,7 @@ def generate_image(prompt,
|
||||
sft_format=vl_chat_processor.sft_format,
|
||||
system_prompt='')
|
||||
text = text + vl_chat_processor.image_start_tag
|
||||
|
||||
|
||||
input_ids = torch.LongTensor(tokenizer.encode(text))
|
||||
output, patches = generate(input_ids,
|
||||
width // 16 * 16,
|
||||
@ -169,75 +191,79 @@ def generate_image(prompt,
|
||||
parallel_size=parallel_size)
|
||||
|
||||
return [Image.fromarray(images[i]).resize((768, 768), Image.LANCZOS) for i in range(parallel_size)]
|
||||
|
||||
|
||||
|
||||
# Gradio interface
|
||||
with gr.Blocks() as demo:
|
||||
gr.Markdown(value="# Multimodal Understanding")
|
||||
with gr.Row():
|
||||
image_input = gr.Image()
|
||||
with gr.Column():
|
||||
question_input = gr.Textbox(label="Question")
|
||||
und_seed_input = gr.Number(label="Seed", precision=0, value=42)
|
||||
top_p = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="top_p")
|
||||
temperature = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.05, label="temperature")
|
||||
|
||||
understanding_button = gr.Button("Chat")
|
||||
understanding_output = gr.Textbox(label="Response")
|
||||
gr.Markdown(value="# Multimodal Model Demo: Janus-Pro-7B & 1B")
|
||||
|
||||
examples_inpainting = gr.Examples(
|
||||
label="Multimodal Understanding examples",
|
||||
examples=[
|
||||
[
|
||||
"explain this meme",
|
||||
"images/doge.png",
|
||||
],
|
||||
[
|
||||
"Convert the formula into latex code.",
|
||||
"images/equation.png",
|
||||
],
|
||||
],
|
||||
inputs=[question_input, image_input],
|
||||
model_selector = gr.Dropdown(
|
||||
["deepseek-ai/Janus-Pro-7B", "deepseek-ai/Janus-Pro-1B"],
|
||||
value="deepseek-ai/Janus-Pro-7B", label="Select Model"
|
||||
)
|
||||
|
||||
|
||||
gr.Markdown(value="# Text-to-Image Generation")
|
||||
|
||||
|
||||
|
||||
with gr.Row():
|
||||
cfg_weight_input = gr.Slider(minimum=1, maximum=10, value=5, step=0.5, label="CFG Weight")
|
||||
t2i_temperature = gr.Slider(minimum=0, maximum=1, value=1.0, step=0.05, label="temperature")
|
||||
with gr.Tab("Multimodal Understanding"):
|
||||
with gr.Row():
|
||||
image_input = gr.Image()
|
||||
with gr.Column():
|
||||
question_input = gr.Textbox(label="Question")
|
||||
und_seed_input = gr.Number(label="Seed", precision=0, value=42)
|
||||
top_p = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="top_p")
|
||||
temperature = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.05, label="temperature")
|
||||
|
||||
prompt_input = gr.Textbox(label="Prompt. (Prompt in more detail can help produce better images!)")
|
||||
seed_input = gr.Number(label="Seed (Optional)", precision=0, value=12345)
|
||||
understanding_button = gr.Button("Chat")
|
||||
understanding_output = gr.Textbox(label="Response")
|
||||
|
||||
generation_button = gr.Button("Generate Images")
|
||||
examples_inpainting = gr.Examples(
|
||||
label="Multimodal Understanding examples",
|
||||
examples=[
|
||||
[
|
||||
"explain this meme",
|
||||
"images/doge.png",
|
||||
],
|
||||
[
|
||||
"Convert the formula into latex code.",
|
||||
"images/equation.png",
|
||||
],
|
||||
],
|
||||
inputs=[question_input, image_input],
|
||||
)
|
||||
|
||||
image_output = gr.Gallery(label="Generated Images", columns=2, rows=2, height=300)
|
||||
with gr.Tab("Text-to-Image Generation"):
|
||||
with gr.Row():
|
||||
cfg_weight_input = gr.Slider(minimum=1, maximum=10, value=5, step=0.5, label="CFG Weight")
|
||||
t2i_temperature = gr.Slider(minimum=0, maximum=1, value=1.0, step=0.05, label="temperature")
|
||||
parallel_size_slider = gr.Slider(minimum=1, maximum=5, value=5, step=1, label="Parallel Size") # New slider
|
||||
|
||||
prompt_input = gr.Textbox(label="Prompt. (Prompt in more detail can help produce better images!)")
|
||||
seed_input = gr.Number(label="Seed (Optional)", precision=0, value=12345)
|
||||
|
||||
generation_button = gr.Button("Generate Images")
|
||||
|
||||
image_output = gr.Gallery(label="Generated Images", columns=2, rows=2, height=300)
|
||||
|
||||
examples_t2i = gr.Examples(
|
||||
label="Text to image generation examples.",
|
||||
examples=[
|
||||
"Master shifu racoon wearing drip attire as a street gangster.",
|
||||
"The face of a beautiful girl",
|
||||
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
||||
"A glass of red wine on a reflective surface.",
|
||||
"A cute and adorable baby fox with big brown eyes, autumn leaves in the background enchanting,immortal,fluffy, shiny mane,Petals,fairyism,unreal engine 5 and Octane Render,highly detailed, photorealistic, cinematic, natural colors.",
|
||||
"The image features an intricately designed eye set against a circular backdrop adorned with ornate swirl patterns that evoke both realism and surrealism. At the center of attention is a strikingly vivid blue iris surrounded by delicate veins radiating outward from the pupil to create depth and intensity. The eyelashes are long and dark, casting subtle shadows on the skin around them which appears smooth yet slightly textured as if aged or weathered over time.\n\nAbove the eye, there's a stone-like structure resembling part of classical architecture, adding layers of mystery and timeless elegance to the composition. This architectural element contrasts sharply but harmoniously with the organic curves surrounding it. Below the eye lies another decorative motif reminiscent of baroque artistry, further enhancing the overall sense of eternity encapsulated within each meticulously crafted detail. \n\nOverall, the atmosphere exudes a mysterious aura intertwined seamlessly with elements suggesting timelessness, achieved through the juxtaposition of realistic textures and surreal artistic flourishes. Each component\u2014from the intricate designs framing the eye to the ancient-looking stone piece above\u2014contributes uniquely towards creating a visually captivating tableau imbued with enigmatic allure.",
|
||||
],
|
||||
inputs=prompt_input,
|
||||
)
|
||||
|
||||
examples_t2i = gr.Examples(
|
||||
label="Text to image generation examples.",
|
||||
examples=[
|
||||
"Master shifu racoon wearing drip attire as a street gangster.",
|
||||
"The face of a beautiful girl",
|
||||
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
||||
"A glass of red wine on a reflective surface.",
|
||||
"A cute and adorable baby fox with big brown eyes, autumn leaves in the background enchanting,immortal,fluffy, shiny mane,Petals,fairyism,unreal engine 5 and Octane Render,highly detailed, photorealistic, cinematic, natural colors.",
|
||||
"The image features an intricately designed eye set against a circular backdrop adorned with ornate swirl patterns that evoke both realism and surrealism. At the center of attention is a strikingly vivid blue iris surrounded by delicate veins radiating outward from the pupil to create depth and intensity. The eyelashes are long and dark, casting subtle shadows on the skin around them which appears smooth yet slightly textured as if aged or weathered over time.\n\nAbove the eye, there's a stone-like structure resembling part of classical architecture, adding layers of mystery and timeless elegance to the composition. This architectural element contrasts sharply but harmoniously with the organic curves surrounding it. Below the eye lies another decorative motif reminiscent of baroque artistry, further enhancing the overall sense of eternity encapsulated within each meticulously crafted detail. \n\nOverall, the atmosphere exudes a mysterious aura intertwined seamlessly with elements suggesting timelessness, achieved through the juxtaposition of realistic textures and surreal artistic flourishes. Each component\u2014from the intricate designs framing the eye to the ancient-looking stone piece above\u2014contributes uniquely towards creating a visually captivating tableau imbued with enigmatic allure.",
|
||||
],
|
||||
inputs=prompt_input,
|
||||
)
|
||||
|
||||
understanding_button.click(
|
||||
multimodal_understanding,
|
||||
inputs=[image_input, question_input, und_seed_input, top_p, temperature],
|
||||
inputs=[model_selector, image_input, question_input, und_seed_input, top_p, temperature], # Added model_selector
|
||||
outputs=understanding_output
|
||||
)
|
||||
|
||||
|
||||
generation_button.click(
|
||||
fn=generate_image,
|
||||
inputs=[prompt_input, seed_input, cfg_weight_input, t2i_temperature],
|
||||
inputs=[model_selector, prompt_input, seed_input, cfg_weight_input, t2i_temperature, parallel_size_slider], # Added model_selector and parallel_size_slider
|
||||
outputs=image_output
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user