发布时间:2025-04-18 18:06:50编辑:123阅读(50)
代码链接:https://github.com/deepseek-ai/Janus
模型链接:https://modelscope.cn/collections/Janus-Pro-0f5e48f6b96047
体验页面:https://modelscope.cn/studios/AI-ModelScope/Janus-Pro-7B
安装虚拟环境
conda create --name vll python=3.9
激活虚拟环境,执行命令:
conda activate vll
查看CUDA版本,执行命令:
nvcc -V
创建项目目录
mkdir vllm
cd vllm
安装Pytorch,选择一个合适的版本,2.0以上的版本就行,执行命令:
conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 pytorch-cuda=11.8 -c pytorch -c nvidia
pip install git+https://github.com/deepseek-ai/Janus
下载模型
或者可以用modelscope下载,安装modelscope,命令如下:
pip install modelscope
modelscope download --model deepseek-ai/Janus-Pro-1B
复制整个目录到vllm项目里面
mv /home/sam_admin/.cache/modelscope/hub/models/deepseek-ai .
查看目录结构
tree ../vllm
git clone https://github.com/deepseek-ai/Janus.git
进入Janus里面执行
cd Janus
pip install -e .
复制janus目录到vllm下
cp -r janus ../
目录结构如下
更换numpy的版本,现在默认是2.0以上的版本,所以需要降版本,我使用的是1.26.3,执行命令:
pip install numpy==1.26.3
测试图像理解
创建image_understanding.py文件,代码如下:
import torch from transformers import AutoModelForCausalLM from janus.models import MultiModalityCausalLM, VLChatProcessor from janus.utils.io import load_pil_images model_path = "deepseek-ai/Janus-Pro-1B" image='aa.jpeg' question='请说明一下这张图片' vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path) tokenizer = vl_chat_processor.tokenizer vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True ) vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() conversation = [ { "role": "<|User|>", "content": f"<image_placeholder>\n{question}", "images": [image], }, {"role": "<|Assistant|>", "content": ""}, ] # load images and prepare for inputs pil_images = load_pil_images(conversation) prepare_inputs = vl_chat_processor( conversations=conversation, images=pil_images, force_batchify=True ).to(vl_gpt.device) # # run image encoder to get the image embeddings inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) # # run the model to get the response outputs = vl_gpt.language_model.generate( inputs_embeds=inputs_embeds, attention_mask=prepare_inputs.attention_mask, pad_token_id=tokenizer.eos_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, max_new_tokens=512, do_sample=False, use_cache=True, ) answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True) print(f"{prepare_inputs['sft_format'][0]}", answer)
上传一张aa.jpeg图片到当前目录下,目录结构如下:
运行代码结果如下:
aa.jpeg
测试图片生成
新建image_generation.py脚本,代码如下:
import os import PIL.Image import torch import numpy as np from transformers import AutoModelForCausalLM from janus.models import MultiModalityCausalLM, VLChatProcessor # specify the path to the model model_path = "deepseek-ai/Janus-Pro-1B" vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path) tokenizer = vl_chat_processor.tokenizer vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True ) vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() conversation = [ { "role": "<|用户|>", "content": "一位来自国外的惊艳公主,身穿红白相间的传统服装,蓝眼睛,棕色头发", }, {"role": "<|助手|>", "content": ""}, ] sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts( conversations=conversation, sft_format=vl_chat_processor.sft_format, system_prompt="", ) prompt = sft_format + vl_chat_processor.image_start_tag @torch.inference_mode() def generate( mmgpt: MultiModalityCausalLM, vl_chat_processor: VLChatProcessor, prompt: str, temperature: float = 1, parallel_size: int = 16, cfg_weight: float = 5, image_token_num_per_image: int = 576, img_size: int = 384, patch_size: int = 16, ): input_ids = vl_chat_processor.tokenizer.encode(prompt) input_ids = torch.LongTensor(input_ids) tokens = torch.zeros((parallel_size * 2, len(input_ids)), dtype=torch.int).cuda() for i in range(parallel_size * 2): tokens[i, :] = input_ids if i % 2 != 0: tokens[i, 1:-1] = vl_chat_processor.pad_id inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens) generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda() for i in range(image_token_num_per_image): outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True, past_key_values=outputs.past_key_values if i != 0 else None) hidden_states = outputs.last_hidden_state logits = mmgpt.gen_head(hidden_states[:, -1, :]) logit_cond = logits[0::2, :] logit_uncond = logits[1::2, :] logits = logit_uncond + cfg_weight * (logit_cond - logit_uncond) probs = torch.softmax(logits / temperature, dim=-1) next_token = torch.multinomial(probs, num_samples=1) generated_tokens[:, i] = next_token.squeeze(dim=-1) next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1) img_embeds = mmgpt.prepare_gen_img_embeds(next_token) inputs_embeds = img_embeds.unsqueeze(dim=1) dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int), shape=[parallel_size, 8, img_size // patch_size, img_size // patch_size]) dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1) dec = np.clip((dec + 1) / 2 * 255, 0, 255) visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8) visual_img[:, :, :] = dec os.makedirs('generated_samples', exist_ok=True) for i in range(parallel_size): save_path = os.path.join('generated_samples', "img_{}.jpg".format(i)) PIL.Image.fromarray(visual_img[i]).save(save_path) generate( vl_gpt, vl_chat_processor, prompt, )
上一篇: Dify+Ollama+deepseek部署本地大模型
下一篇: WSL从C盘迁移到D盘
48446
47332
38210
35460
29917
26622
25594
20545
20231
18663
45°
50°
157°
106°
138°
239°
326°
328°
305°
394°