tayyab-chat / app.py
tayyab-077's picture
updatation
cf9f6e3
# app.py β€” from local backup
import gradio as gr
import os
import tempfile
import textwrap
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Any, Optional
from src.model_loader import load_local_model
from src.conversation import ConversationMemory
from src.chatbot import LocalChatbot
llm = load_local_model()
memory = ConversationMemory()
bot = LocalChatbot(llm, memory)
INTENT_TEMPLATES = {
"math": "You are a math solver. Solve step-by-step only.",
"code": "You are a coding expert. Provide clean, working code.",
"civics": "Explain clearly like a Class 10 SST teacher.",
"exam": "Prepare concise exam-focused notes and important questions."
}
def now_ts():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# ----------------------
# EXPORT TXT/PDF
# ----------------------
def export_chat_files(history: List[Dict[str, Any]]) -> Dict[str, Optional[str]]:
tmpdir = tempfile.gettempdir()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
def remove_last_closing_line(lines):
closing_keywords = [
"let me know", "is there anything else",
"anything else i can help", "feel free to ask",
"hope this helps", "need further assistance",
"feel free", "happy to help", "Hello! How can I assist you today?",
"Are there any specific industries or areas you'd like to explore in more detail?" ,
"How can I help you better?" ,
"What did you like about our interaction?",
"Do you have any feedback on your experience?" ,
"would you like to explore", "need clarification"
]
if not lines:
return lines
last = lines[-1].lower().strip()
if any(k in last for k in closing_keywords):
return lines[:-1]
return lines
with open(txt_path, "w", encoding="utf-8") as f:
for msg in history:
content = msg.get("content", "")
lines = content.splitlines()
clean = [l for l in lines if not l.strip().startswith("πŸ•’")]
if clean and clean[0].startswith("**"):
clean = clean[1:]
clean = [l for l in clean if not set(l) == {"-"}]
clean = [l.replace("USER:", "").replace("ASSISTANT:", "").strip() for l in clean]
clean = remove_last_closing_line(clean)
f.write("\n".join(clean).strip() + "\n")
f.write("-" * 60 + "\n")
pdf_path = None
try:
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
c = canvas.Canvas(pdf_path, pagesize=A4)
width, height = A4
margin = 40
textobject = c.beginText(margin, height - margin)
textobject.setFont("Helvetica", 10)
with open(txt_path, "r", encoding="utf-8") as fh:
for line in fh:
for wrapped in textwrap.wrap(line.rstrip(), 100):
textobject.textLine(wrapped)
c.drawText(textobject)
c.showPage()
c.save()
except:
pdf_path = None
return {"txt": txt_path, "pdf": pdf_path}
# ----------------------
# Core chat function
# ----------------------
def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
if history is None:
history = []
if not user_msg.strip():
return history
intent = None
low = user_msg.lower()
for key in INTENT_TEMPLATES:
if low.startswith(key):
intent = key
user_msg = user_msg[len(key):].strip()
break
system_prefix = INTENT_TEMPLATES.get(intent, None)
if system_prefix:
prompt = f"{system_prefix}\nUser: {user_msg}"
else:
prompt = f"User: {user_msg}"
bot_reply = bot.ask(prompt)
ts = now_ts()
bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
history.append({"role": "user", "content": user_msg})
history.append({"role": "assistant", "content": bot_reply_ts})
try:
memory.add(user_msg, bot_reply)
except:
pass
return history
# ----------------------
# CUSTOM CSS
# ----------------------
CUSTOM_CSS = """
/* GLOBAL */
.gradio-container {
background: linear-gradient(135deg, #f6f7f9 0%, #e9ecf1 100%);
font-family: Inter, system-ui;
}
#main_card h3 {
font-size: 28px !important;
font-weight: 700 !important;
background: linear-gradient(90deg, #0ea5e9, #06b6d4);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
text-align: center;
border-bottom: 2px solid rgba(0,0,0,0.15);
}
/* MAIN CARD */
#main_card {
background: #ffffff;
border: 1px solid #e3e8ef;
border-radius: 16px;
padding: 16px;
box-shadow: 0 4px 16px rgba(0,0,0,0.05);
}
/* Chat UI */
.gradio-chatbot .assistant {
background: #4f46e5 !important;
color: white !important;
border-radius: 14px;
padding: 12px;
}
.gradio-chatbot .user {
background: #f1f5f9 !important;
border-radius: 14px;
padding: 12px;
}
/* Input box */
#message-box textarea {
background: #e0e7ff !important;
border-radius: 12px !important;
font-size: 24px !important;
}
/* Send button */
.send-btn {
background: #4f46e5 !important;
color: white !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.send-btn:hover {
background: #4338ca !important; /* darker indigo */
transform: scale(1.05);
}
/* Mic / Voice input button */
.icon-btn {
background: #f1f5f9 !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.icon-btn:hover {
background: #e2e8f0 !important; /* slightly darker */
transform: scale(1.05);
}
"""
#----------------------
#JS (Voice only)
#----------------------
PAGE_JS = """
<script>
(function(){
window.startVoiceRecognition = function(elem_id){
const wrapper = document.getElementById(elem_id);
if(!wrapper) return;
const textarea = wrapper.querySelector('textarea');
if(!textarea) return;
const SR = window.SpeechRecognition || window.webkitSpeechRecognition;
if(!SR) return alert('Speech recognition not supported');
const recog = new SR();
recog.lang = 'en-US';
recog.interimResults = false;
recog.onresult = function(e){
textarea.value = e.results[0][0].transcript;
// Visual flash to confirm input
textarea.style.background = "#e7f5ff";
setTimeout(() => { textarea.style.background = ""; }, 400);
};
recog.start();
};
})();
</script>
"""
# ----------------------
# UI
# ----------------------
with gr.Blocks(css=CUSTOM_CSS, title="Tayyab β€” Chatbot") as demo:
gr.HTML(PAGE_JS)
with gr.Row():
with gr.Column(scale=1, min_width=220):
gr.Markdown("### ⚑ Tools & Export")
new_chat_btn = gr.Button("βž• New Chat")
export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
with gr.Column(scale=3, elem_id="main_card"):
gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
chatbot = gr.Chatbot(height=480, type="messages", elem_id="chatbot_box")
with gr.Row():
msg = gr.Textbox(
placeholder="Type a message or use the mic",
elem_id="message-box",
show_label=False,
lines=3,
)
send_btn = gr.Button("Send", elem_classes="send-btn")
mic_btn = gr.Button("🎀 Voice input", elem_classes="icon-btn")
mic_btn.click(None, None, None, js='startVoiceRecognition("message-box")')
file_txt = gr.File(visible=False)
file_pdf = gr.File(visible=False)
send_btn.click(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
msg.submit(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
def new_chat():
memory.clear()
return []
new_chat_btn.click(new_chat, outputs=[chatbot])
def export_handler(history):
files = export_chat_files(history or [])
return (
gr.update(value=files.get("txt"), visible=True),
gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
)
export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
if __name__ == "__main__":
demo.launch()