DF11 multimodal LLMs
Collection
4 items
โข
Updated
For more information (including how to compress models yourself), check out https://huggingface.co/DFloat11 and https://github.com/LeanModels/DFloat11
Feel free to request for other models for compression as well (for either the diffusers library, ComfyUI, or any other model), although compressing models that are of architectures that are unfamiliar to me might be more difficult.
transformers
import torch
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
from dfloat11 import DFloat11Model
# default: Load the model on the available device(s)
model = Qwen3VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen3-VL-8B-Thinking", dtype=torch.bfloat16, device_map="cpu"
)
# We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
# model = Qwen3VLForConditionalGeneration.from_pretrained(
# "Qwen/Qwen3-VL-8B-Thinking",
# dtype=torch.bfloat16,
# attn_implementation="flash_attention_2",
# device_map="auto",
# )
DFloat11Model.from_pretrained("mingyi456/Qwen3-VL-8B-Thinking-DF11", device = "cpu", bfloat16_model = model)
model.to("cuda")
processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-8B-Thinking")
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
# Preparation for inference
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=1024)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
This is the pattern_dict for compression:
pattern_dict={
r"model\.visual\.blocks\.\d+": [
"attn.qkv",
"attn.proj",
"mlp.linear_fc1",
"mlp.linear_fc2"
],
r"model\.visual\.merger": [
"linear_fc1",
"linear_fc2",
],
r"model\.visual\.deepstack_merger_list\.\d+": [
"linear_fc1",
"linear_fc2",
],
r"model\.language_model\.embed_tokens": [],
r"model\.language_model\.layers\.\d+":[
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
"self_attn.o_proj",
"mlp.gate_proj",
"mlp.up_proj",
"mlp.down_proj"
],
r"lm_head": []
}
Base model
Qwen/Qwen3-VL-8B-Thinking