Update README.md
Browse files
README.md
CHANGED
|
@@ -36,7 +36,7 @@ quantization_config=BitsAndBytesConfig(
|
|
| 36 |
bnb_4bit_quant_type="nf4",
|
| 37 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 38 |
)
|
| 39 |
-
model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True, torch_dtype=torch.bfloat16
|
| 40 |
# quantization_config=quantization_config, # Uncomment this line for 4bit quantization
|
| 41 |
)
|
| 42 |
model.eval()
|
|
|
|
| 36 |
bnb_4bit_quant_type="nf4",
|
| 37 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
| 38 |
)
|
| 39 |
+
model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True, torch_dtype=torch.bfloat16,
|
| 40 |
# quantization_config=quantization_config, # Uncomment this line for 4bit quantization
|
| 41 |
)
|
| 42 |
model.eval()
|