ThieLin commited on
Commit
e9199db
·
verified ·
1 Parent(s): 5ac14f5
Files changed (1) hide show
  1. app.py +26 -55
app.py CHANGED
@@ -2,58 +2,29 @@ import gradio as gr
2
  from transformers import pipeline
3
  from sentence_transformers import SentenceTransformer, util
4
 
5
- class ModelComparator:
6
- def __init__(self):
7
- self.qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
8
- self.text_gen_pipeline = pipeline("text-generation", model="gpt2", max_new_tokens=20) # menor geração
9
- self.sim_model = SentenceTransformer("all-MiniLM-L6-v2")
10
-
11
- def get_qa_answer(self, question, context=None):
12
- if not context:
13
- return "No context provided for QA model."
14
- try:
15
- result = self.qa_pipeline(question=question, context=context)
16
- return result['answer']
17
- except Exception as e:
18
- return f"Error in QA pipeline: {e}"
19
-
20
- def get_text_gen_answer(self, prompt):
21
- try:
22
- generated = self.text_gen_pipeline(prompt)[0]['generated_text']
23
- answer = generated[len(prompt):].strip()
24
- return answer if answer else generated.strip()
25
- except Exception as e:
26
- return f"Error in text generation pipeline: {e}"
27
-
28
- def compare_answers(self, answer1, answer2):
29
- emb1 = self.sim_model.encode(answer1, convert_to_tensor=True)
30
- emb2 = self.sim_model.encode(answer2, convert_to_tensor=True)
31
- similarity = util.cos_sim(emb1, emb2).item()
32
- return round(similarity, 3)
33
-
34
- def respond(self, question, context):
35
- qa_answer = self.get_qa_answer(question, context)
36
- gen_answer = self.get_text_gen_answer(question)
37
- similarity = self.compare_answers(qa_answer, gen_answer)
38
-
39
- return (f"Model QA answer:\n{qa_answer}\n\n"
40
- f"Model GPT-2 generated answer:\n{gen_answer}\n\n"
41
- f"Semantic similarity score: {similarity}")
42
-
43
- model_comparator = ModelComparator()
44
-
45
- with gr.Blocks() as demo:
46
- gr.Markdown("## Comparador rápido para Hugging Face Spaces")
47
- question_input = gr.Textbox(label="Pergunta")
48
- context_input = gr.Textbox(label="Contexto para o modelo de QA (opcional)", lines=3)
49
- output = gr.Textbox(label="Respostas e Similaridade", lines=15)
50
- btn = gr.Button("Comparar")
51
-
52
- btn.click(
53
- fn=model_comparator.respond,
54
- inputs=[question_input, context_input],
55
- outputs=output
56
- )
57
-
58
- if __name__ == "__main__":
59
- demo.launch()
 
2
  from transformers import pipeline
3
  from sentence_transformers import SentenceTransformer, util
4
 
5
+ # Carregamento dos modelos
6
+ model_a = pipeline("text-generation", model="tiiuae/falcon-7b-instruct")
7
+ model_b = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.1")
8
+ similarity_model = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L6-v2")
9
+
10
+ def comparar_respostas(prompt):
11
+ resp_a = model_a(prompt, max_new_tokens=80)[0]["generated_text"]
12
+ resp_b = model_b(prompt, max_new_tokens=80)[0]["generated_text"]
13
+ emb_a = similarity_model.encode(resp_a, convert_to_tensor=True)
14
+ emb_b = similarity_model.encode(resp_b, convert_to_tensor=True)
15
+ similaridade = util.cos_sim(emb_a, emb_b).item()
16
+
17
+ return resp_a.strip(), resp_b.strip(), f"{similaridade:.4f}"
18
+
19
+ interface = gr.Interface(
20
+ fn=comparar_respostas,
21
+ inputs=gr.Textbox(label="Digite seu prompt"),
22
+ outputs=[
23
+ gr.Textbox(label="Resposta do Modelo A (Falcon)"),
24
+ gr.Textbox(label="Resposta do Modelo B (Mistral)"),
25
+ gr.Textbox(label="Similaridade entre as respostas")
26
+ ],
27
+ title="Comparador de Modelos LLM - Hugging Face"
28
+ )
29
+
30
+ interface.launch()