codelion commited on
Commit
40643a7
·
verified ·
1 Parent(s): 671befc

Update code examples with diffusion-specific parameters

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -164,17 +164,17 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
164
  tokenizer = AutoTokenizer.from_pretrained("codelion/dhara-70m")
165
  model = AutoModelForCausalLM.from_pretrained("codelion/dhara-70m", trust_remote_code=True)
166
 
167
- # Generate text
168
  inputs = tokenizer("The future of AI is", return_tensors="pt")
169
  outputs = model.generate(
170
  **inputs,
171
- max_length=50,
 
172
  do_sample=True,
173
  temperature=0.8,
174
- top_p=0.9,
175
- pad_token_id=tokenizer.eos_token_id
176
  )
177
- print(tokenizer.decode(outputs[0]))
178
  ```
179
 
180
  ### Batch Generation (High Throughput)
 
164
  tokenizer = AutoTokenizer.from_pretrained("codelion/dhara-70m")
165
  model = AutoModelForCausalLM.from_pretrained("codelion/dhara-70m", trust_remote_code=True)
166
 
167
+ # Generate text using diffusion sampling
168
  inputs = tokenizer("The future of AI is", return_tensors="pt")
169
  outputs = model.generate(
170
  **inputs,
171
+ max_new_tokens=40, # Generate 40 new tokens
172
+ num_diffusion_steps=10, # Diffusion denoising steps (higher = better quality)
173
  do_sample=True,
174
  temperature=0.8,
175
+ top_p=0.9
 
176
  )
177
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
178
  ```
179
 
180
  ### Batch Generation (High Throughput)