Spaces:
Running
Running
File size: 9,313 Bytes
a36268e 699e0ab a36268e 075fe7d cf9f6e3 699e0ab c120d1d a36268e 3faf47c cf9f6e3 3faf47c 699e0ab 3c5bfb7 a36268e 3c5bfb7 22487cb a36268e cf9f6e3 0f2d6cc cf9f6e3 674b157 cf9f6e3 22487cb 674b157 70cf92b 674b157 cf9f6e3 0f2d6cc cf9f6e3 3c5bfb7 22487cb 3c5bfb7 84fe593 0f2d6cc 70cf92b 674b157 3c5bfb7 674b157 a36268e 674b157 70cf92b 3c5bfb7 0f2d6cc 3c5bfb7 674b157 3c5bfb7 674b157 84fe593 cf9f6e3 0f2d6cc cf9f6e3 a36268e cf9f6e3 3c5bfb7 e6d7f60 d81040e e6d7f60 d81040e e6d7f60 d81040e e6d7f60 d81040e e6d7f60 d81040e e6d7f60 d81040e e6d7f60 3c5bfb7 d81040e 3c5bfb7 a36268e 3c5bfb7 231839c a36268e 231839c cf9f6e3 231839c a36268e 231839c 036f8f3 a36268e a67cb37 699e0ab 075fe7d cf9f6e3 699e0ab 075fe7d db8ecbe 699e0ab a36268e 075fe7d 699e0ab cf9f6e3 6f2bf7e cf9f6e3 6a98756 075fe7d 699e0ab 075fe7d cf9f6e3 84fe593 cf9f6e3 3c5bfb7 a36268e 699e0ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 |
# app.py
import gradio as gr
import os
import tempfile
import textwrap
from datetime import datetime
from typing import List, Dict, Any, Optional
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from src.model_loader import load_local_model
from src.conversation import ConversationMemory
from src.chatbot import LocalChatbot
# -----------------------
# Initialize
# -----------------------
llm = load_local_model()
memory = ConversationMemory()
bot = LocalChatbot(llm, memory)
INTENT_TEMPLATES = {
"math": "You are a math solver. Solve step-by-step only.",
"code": "You are a coding expert. Provide clean, working code.",
"civics": "Explain clearly like a Class 10 SST teacher.",
"exam": "Prepare concise exam-focused notes and important questions."
}
def now_ts():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# ----------------------
# Export TXT/PDF
# ----------------------
# ----------------------
# Clean PDF Export
# ----------------------
def export_chat_files(history: List[Dict[str, Any]]) -> Dict[str, Optional[str]]:
tmpdir = tempfile.gettempdir()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
def clean_content(content):
"""Extract string from Gradio format and remove final timestamp."""
text = ""
if isinstance(content, list):
for item in content:
if isinstance(item, dict) and "text" in item:
text += item["text"] + "\n"
elif isinstance(content, dict) and "text" in content:
text = content["text"]
else:
text = str(content)
# Remove timestamp line starting with β or π
lines = [l for l in text.splitlines() if not l.strip().startswith(("β ", "π"))]
return "\n".join(lines).strip()
# ---------------- TXT FILE ----------------
with open(txt_path, "w", encoding="utf-8") as f:
for msg in history:
role = msg.get("role", "user").capitalize()
content = clean_content(msg.get("content", ""))
f.write(f"{role}:\n{content}\n\n")
# ---------------- PDF FILE ----------------
try:
c = canvas.Canvas(pdf_path, pagesize=A4)
page_width, page_height = A4
margin = 40
y = page_height - margin
line_height = 16
font_size = 11
c.setFont("Helvetica", font_size)
for msg in history:
role = msg.get("role", "user").capitalize()
content = clean_content(msg.get("content", ""))
lines = content.splitlines()
for line in lines:
wrapped = textwrap.wrap(line, width=90)
for wline in wrapped:
if y < margin + line_height:
c.showPage()
c.setFont("Helvetica", font_size)
y = page_height - margin
c.drawString(margin, y, f"{role}: {wline}" if role=="User" else wline)
y -= line_height
y -= line_height // 2
c.showPage()
c.save()
except Exception as e:
print("PDF export failed:", e)
pdf_path = None
return {"txt": txt_path, "pdf": pdf_path}
# ----------------------
# Core chat function
# ----------------------
# def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
# if history is None:
# history = []
# if not user_msg.strip():
# return history
# # Detect intent for system prompt if needed
# intent = None
# low = user_msg.lower()
# for key in INTENT_TEMPLATES:
# if low.startswith(key):
# intent = key
# user_msg = user_msg[len(key):].strip()
# break
# # Use intent in bot.ask (optional, can pass intent)
# system_prefix = INTENT_TEMPLATES.get(intent, "Answer briefly in 2β3 sentences.")
# prompt = f"{system_prefix}\nUser: {user_msg}"
# bot_reply = bot.ask(user_msg, intent=intent)
# # Timestamp only for UI display
# ts = now_ts()
# bot_reply_ui = f"{bot_reply}\n\nπ {ts}"
# # Append **clean text** to history
# history.append({"role": "user", "content": str(user_msg)})
# history.append({"role": "assistant", "content": str(bot_reply)})
# try:
# memory.add(user_msg, bot_reply)
# except:
# pass
# return history
def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
if history is None:
history = []
if not user_msg.strip():
return history
# Detect intent prefix from templates
intent = None
low = user_msg.lower()
for key in INTENT_TEMPLATES:
if low.startswith(key):
intent = key
user_msg = user_msg[len(key):].strip()
break
# Ask chatbot (pass intent)
bot_reply = bot.ask(user_msg, intent=intent)
ts = now_ts()
bot_reply_ts = f"{bot_reply}\n\nπ {ts}"
history.append({"role": "user", "content": str(user_msg)})
history.append({"role": "assistant", "content": str(bot_reply_ts)})
try:
memory.add(user_msg, bot_reply)
except:
pass
return history
# ----------------------
# UI / Gradio
# ----------------------
CUSTOM_CSS = """
/* GLOBAL */
.gradio-container {
background: linear-gradient(135deg, #f6f7f9 0%, #e9ecf1 100%);
font-family: Inter, system-ui;
}
#main_card h3 {
font-size: 28px !important;
font-weight: 700 !important;
background: linear-gradient(90deg, #0ea5e9, #06b6d4);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
text-align: center;
border-bottom: 2px solid rgba(0,0,0,0.15);
}
/* MAIN CARD */
#main_card {
background: #ffffff;
border: 1px solid #e3e8ef;
border-radius: 16px;
padding: 16px;
box-shadow: 0 4px 16px rgba(0,0,0,0.05);
}
/* Chat UI */
.gradio-chatbot .assistant {
background: #4f46e5 !important;
color: white !important;
border-radius: 14px;
padding: 12px;
}
.gradio-chatbot .user {
background: #f1f5f9 !important;
border-radius: 14px;
padding: 12px;
}
/* Input box */
#message-box textarea {
background: #e0e7ff !important;
border-radius: 12px !important;
font-size: 24px !important;
}
/* Send button */
.send-btn {
background: #4f46e5 !important;
color: white !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.send-btn:hover {
background: #4338ca !important;
transform: scale(1.05);
}
/* Mic / Voice input button */
.icon-btn {
background: #f1f5f9 !important;
transition: background 0.2s ease, transform 0.2s ease;
}
.icon-btn:hover {
background: #e2e8f0 !important;
transform: scale(1.05);
}
"""
PAGE_JS = """
<script>
(function(){
window.startVoiceRecognition = function(elem_id){
const wrapper = document.getElementById(elem_id);
if(!wrapper) return;
const textarea = wrapper.querySelector('textarea');
if(!textarea) return;
const SR = window.SpeechRecognition || window.webkitSpeechRecognition;
if(!SR) return alert('Speech recognition not supported');
const recog = new SR();
recog.lang = 'en-US';
recog.interimResults = false;
recog.onresult = function(e){
textarea.value = e.results[0][0].transcript;
textarea.style.background = "#e7f5ff";
setTimeout(() => { textarea.style.background = ""; }, 400);
};
recog.start();
};
})();
</script>
"""
with gr.Blocks(title="Tayyab β Chatbot") as demo:
gr.HTML(f"<style>{CUSTOM_CSS}</style><script>{PAGE_JS}</script>")
with gr.Row():
with gr.Column(scale=1, min_width=220):
gr.Markdown("### β‘ Tools & Export")
new_chat_btn = gr.Button("β New Chat")
export_btn = gr.Button("π₯ Export TXT/PDF")
with gr.Column(scale=3, elem_id="main_card"):
gr.Markdown("<h3>Smart Learning Assistant - Tayyab</h3>")
chatbot = gr.Chatbot(height=480, elem_id="chatbot_box")
with gr.Row():
msg = gr.Textbox(placeholder="Type a message or use the mic", elem_id="message-box", show_label=False, lines=3)
send_btn = gr.Button("Send", elem_classes="send-btn")
mic_btn = gr.Button("π€ Voice input", elem_classes="icon-btn")
mic_btn.click(None, None, None, js='startVoiceRecognition("message-box")')
file_txt = gr.File(visible=False)
file_pdf = gr.File(visible=False)
send_btn.click(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
msg.submit(generate_reply, inputs=[msg, chatbot], outputs=[chatbot])
def new_chat():
memory.clear()
return []
new_chat_btn.click(new_chat, outputs=[chatbot])
def export_handler(history):
files = export_chat_files(history or [])
return (
gr.update(value=files.get("txt"), visible=True),
gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
)
export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
if __name__ == "__main__":
demo.launch()
|