Qwen3
Collection
Qwen3 models converted to Ctranslate2 format.
โข
8 items
โข
Updated
Bloat16 Ctranslate2 compatable version of Qwen/Qwen3-1.7B.
| Model | VRAM Usage |
|---|---|
| Qwen3-32B-ct2-awq | ~18.3 GB |
| Qwen3-14B-ct2-awq | ~9.5 GB |
| Qwen3-8B-ct2-awq | ~5.8 GB |
| ๐ Qwen3-1.7B-ct2-bfloat16 | ~3.3 GB |
| Qwen3-4B-ct2-awq | ~2.6 GB |
| Qwen3-1.7B-ct2-awq | ~1.3 GB |
| Qwen3-0.6B-ct2-awq | ~0.6 GB |
import ctranslate2
from transformers import AutoTokenizer
MODEL_ID = "CTranslate2HQ/Qwen3-1.7B-ct2-bfloat16"
# Load model and tokenizer from Hugging Face Hub
generator = ctranslate2.Generator(MODEL_ID, device="cuda")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
# Format prompt using chat template
messages = [
{"role": "system", "content": "You are a helpful AI assistant."},
{"role": "user", "content": "Write a short poem about a cat."}
]
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
# Tokenize and generate
tokens = tokenizer.convert_ids_to_tokens(tokenizer.encode(prompt))
# Do NOT use the "compute_type" parameter with AWQ models
results = generator.generate_batch(
[tokens],
max_length=8192,
sampling_temperature=0.7,
sampling_topk=50,
compute_type="bfloat16"
)
# Decode and print response
output_ids = results[0].sequences_ids[0]
response = tokenizer.decode(output_ids, skip_special_tokens=True)
print(response)
Requirements:
ctranslate2
transformers
torch
huggingface_hub