| | from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
| |
|
| | |
| | model_path = "./fine_tuned_model" |
| |
|
| | |
| | tokenizer = AutoTokenizer.from_pretrained(model_path) |
| | model = AutoModelForCausalLM.from_pretrained(model_path) |
| |
|
| | |
| | chatbot = pipeline( |
| | "text-generation", |
| | model=model, |
| | tokenizer=tokenizer, |
| | device=0 if torch.cuda.is_available() else -1 |
| | ) |
| |
|
| | |
| | prompt = "Hello, can you tell me some fun facts about european legislation?" |
| | response = chatbot(prompt, max_length=100, do_sample=True, temperature=0.7) |
| | print(response[0]['generated_text']) |