tayyab-077 commited on
Commit
3c5bfb7
Β·
1 Parent(s): 07112a0

updatation

Browse files
Files changed (2) hide show
  1. app.py +101 -59
  2. src/chatbot.py +56 -54
app.py CHANGED
@@ -1,23 +1,19 @@
1
- # app.py β€” Project 2 updated
2
 
3
  import gradio as gr
4
  import os
5
  import tempfile
6
  import textwrap
7
  from datetime import datetime
 
8
  from typing import List, Dict, Any, Optional
9
- from reportlab.lib.pagesizes import A4
10
- from reportlab.pdfgen import canvas
11
- from reportlab.pdfbase import pdfmetrics
12
- from reportlab.pdfbase.ttfonts import TTFont
13
 
14
  from src.model_loader import load_local_model
15
  from src.conversation import ConversationMemory
16
  from src.chatbot import LocalChatbot
17
 
18
- # -------------------------------
19
- # Load LLM and memory
20
- # -------------------------------
21
  llm = load_local_model()
22
  memory = ConversationMemory()
23
  bot = LocalChatbot(llm, memory)
@@ -32,75 +28,105 @@ INTENT_TEMPLATES = {
32
  def now_ts():
33
  return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
34
 
35
- # -------------------------------
36
- # PDF / TXT Export Function
37
- # -------------------------------
 
 
 
 
 
 
 
 
 
38
  def export_chat_files(history: List[Dict[str, any]]) -> Dict[str, Optional[str]]:
39
  tmpdir = tempfile.gettempdir()
40
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
41
  txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
42
  pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
43
 
44
- # ---------------- TXT ----------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  with open(txt_path, "w", encoding="utf-8") as f:
46
  for msg in history:
47
- content = str(msg.get("content", ""))
 
 
 
 
 
48
  lines = content.splitlines()
49
- lines = [l.strip() for l in lines if l.strip() and not l.strip().startswith("πŸ•’")]
50
- for line in lines:
51
- f.write(line + "\n")
52
  f.write("-" * 60 + "\n\n")
53
 
54
- # ---------------- PDF ----------------
55
  try:
56
- # Register bold font for role labels
57
- pdfmetrics.registerFont(TTFont("Helvetica-Bold", "Helvetica-Bold.ttf"))
58
-
59
  c = canvas.Canvas(pdf_path, pagesize=A4)
60
  page_width, page_height = A4
61
  margin = 40
 
62
  line_height = 14
63
  font_size = 10
64
 
65
- textobject = c.beginText(margin, page_height - margin)
66
- textobject.setFont("Helvetica", font_size)
67
 
68
  for msg in history:
69
  role = msg.get("role", "user").capitalize()
70
- content = str(msg.get("content", ""))
 
 
 
 
 
71
  lines = content.splitlines()
72
  clean_lines = [l.strip() for l in lines if l.strip()]
 
73
 
74
  for line in clean_lines:
75
- wrapped = textwrap.wrap(line, width=100)
76
  for wline in wrapped:
77
- if textobject.getY() < margin:
78
- c.drawText(textobject)
79
  c.showPage()
80
- textobject = c.beginText(margin, page_height - margin)
81
- textobject.setFont("Helvetica", font_size)
 
 
82
 
83
- if role == "User":
84
- textobject.setFont("Helvetica-Bold", font_size)
85
- textobject.textLine(f"{role}: {wline}")
86
- textobject.setFont("Helvetica", font_size)
87
- else:
88
- textobject.textLine(wline)
89
 
90
- textobject.textLine("") # spacing between messages
91
-
92
- c.drawText(textobject)
93
  c.showPage()
94
  c.save()
95
  except Exception as e:
96
  print("PDF export failed:", e)
97
  pdf_path = None
98
-
99
  return {"txt": txt_path, "pdf": pdf_path}
100
 
101
- # -------------------------------
102
- # Chat reply function
103
- # -------------------------------
104
  def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
105
  if history is None:
106
  history = []
@@ -108,7 +134,6 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
108
  if not user_msg.strip():
109
  return history
110
 
111
- # Detect intent from message start
112
  intent = None
113
  low = user_msg.lower()
114
  for key in INTENT_TEMPLATES:
@@ -117,19 +142,19 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
117
  user_msg = user_msg[len(key):].strip()
118
  break
119
 
 
120
  system_prefix = INTENT_TEMPLATES.get(intent, None)
121
  if system_prefix:
122
  prompt = f"{system_prefix}\nUser: {user_msg}"
123
  else:
124
  prompt = f"User: {user_msg}"
125
 
126
- bot_reply = bot.ask(user_msg) # intent auto-detected
127
-
128
  ts = now_ts()
129
  bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
130
 
131
- history.append({"role": "user", "content": str(user_msg)})
132
- history.append({"role": "assistant", "content": str(bot_reply_ts)})
133
 
134
  try:
135
  memory.add(user_msg, bot_reply)
@@ -138,9 +163,10 @@ def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
138
 
139
  return history
140
 
141
- # -------------------------------
142
- # Custom CSS
143
- # -------------------------------
 
144
  CUSTOM_CSS = """
145
  /* GLOBAL */
146
  .gradio-container {
@@ -179,6 +205,7 @@ CUSTOM_CSS = """
179
  padding: 12px;
180
  }
181
 
 
182
  /* Input box */
183
  #message-box textarea {
184
  background: #e0e7ff !important;
@@ -192,8 +219,9 @@ CUSTOM_CSS = """
192
  color: white !important;
193
  transition: background 0.2s ease, transform 0.2s ease;
194
  }
 
195
  .send-btn:hover {
196
- background: #4338ca !important;
197
  transform: scale(1.05);
198
  }
199
 
@@ -202,15 +230,18 @@ CUSTOM_CSS = """
202
  background: #f1f5f9 !important;
203
  transition: background 0.2s ease, transform 0.2s ease;
204
  }
 
205
  .icon-btn:hover {
206
- background: #e2e8f0 !important;
207
  transform: scale(1.05);
208
  }
 
209
  """
210
 
211
- # -------------------------------
212
- # JS (Voice)
213
- # -------------------------------
 
214
  PAGE_JS = """
215
  <script>
216
  (function(){
@@ -229,6 +260,8 @@ PAGE_JS = """
229
 
230
  recog.onresult = function(e){
231
  textarea.value = e.results[0][0].transcript;
 
 
232
  textarea.style.background = "#e7f5ff";
233
  setTimeout(() => { textarea.style.background = ""; }, 400);
234
  };
@@ -239,15 +272,19 @@ PAGE_JS = """
239
  </script>
240
  """
241
 
242
- # -------------------------------
243
- # Gradio UI
244
- # -------------------------------
245
  with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
246
- gr.HTML(f"<style>{CUSTOM_CSS}</style><script>{PAGE_JS}</script>")
 
 
 
247
 
248
  with gr.Row():
249
  with gr.Column(scale=1, min_width=220):
250
  gr.Markdown("### ⚑ Tools & Export")
 
251
  new_chat_btn = gr.Button("βž• New Chat")
252
  export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
253
 
@@ -278,14 +315,19 @@ with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
278
 
279
  new_chat_btn.click(new_chat, outputs=[chatbot])
280
 
 
281
  def export_handler(history):
 
282
  files = export_chat_files(history or [])
 
283
  return (
284
  gr.update(value=files.get("txt"), visible=True),
285
  gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
286
  )
287
 
288
- export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
289
 
 
 
 
290
  if __name__ == "__main__":
291
  demo.launch()
 
1
+ # app.py β€” from local backup
2
 
3
  import gradio as gr
4
  import os
5
  import tempfile
6
  import textwrap
7
  from datetime import datetime
8
+ from pathlib import Path
9
  from typing import List, Dict, Any, Optional
 
 
 
 
10
 
11
  from src.model_loader import load_local_model
12
  from src.conversation import ConversationMemory
13
  from src.chatbot import LocalChatbot
14
 
15
+
16
+
 
17
  llm = load_local_model()
18
  memory = ConversationMemory()
19
  bot = LocalChatbot(llm, memory)
 
28
  def now_ts():
29
  return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
30
 
31
+
32
+ # ----------------------
33
+ # EXPORT TXT/PDF
34
+ # ----------------------
35
+ import os
36
+ import tempfile
37
+ import textwrap
38
+ from datetime import datetime
39
+ from typing import List, Dict, Optional
40
+ from reportlab.lib.pagesizes import A4
41
+ from reportlab.pdfgen import canvas
42
+
43
  def export_chat_files(history: List[Dict[str, any]]) -> Dict[str, Optional[str]]:
44
  tmpdir = tempfile.gettempdir()
45
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
46
  txt_path = os.path.join(tmpdir, f"chat_history_{timestamp}.txt")
47
  pdf_path = os.path.join(tmpdir, f"chat_history_{timestamp}.pdf")
48
 
49
+
50
+ def remove_last_closing_line(lines):
51
+ closing_keywords = [
52
+ "let me know", "is there anything else",
53
+ "anything else i can help", "feel free to ask",
54
+ "hope this helps", "need further assistance",
55
+ "feel free", "happy to help",
56
+ "hello! how can i assist you today?",
57
+ "are there any specific industries or areas you'd like to explore in more detail?",
58
+ "how can i help you better?", "what did you like about our interaction?",
59
+ "do you have any feedback on your experience?", "would you like to explore",
60
+ "need clarification"
61
+ ]
62
+ if not lines:
63
+ return lines
64
+ last = lines[-1].lower().strip()
65
+ if any(k in last for k in closing_keywords):
66
+ return lines[:-1]
67
+ return lines
68
+
69
+ # ---------------- TXT FILE ----------------
70
  with open(txt_path, "w", encoding="utf-8") as f:
71
  for msg in history:
72
+ content_data = msg.get("content", "")
73
+ if isinstance(content_data, dict):
74
+ content = content_data.get("text", "")
75
+ else:
76
+ content = str(content_data)
77
+
78
  lines = content.splitlines()
79
+ clean = [l.strip() for l in lines if l.strip() and not l.strip().startswith("πŸ•’")]
80
+ clean = remove_last_closing_line(clean)
81
+ f.write("\n".join(clean) + "\n\n")
82
  f.write("-" * 60 + "\n\n")
83
 
84
+ # ---------------- PDF FILE ----------------
85
  try:
 
 
 
86
  c = canvas.Canvas(pdf_path, pagesize=A4)
87
  page_width, page_height = A4
88
  margin = 40
89
+ y = page_height - margin
90
  line_height = 14
91
  font_size = 10
92
 
93
+ c.setFont("Helvetica", font_size)
 
94
 
95
  for msg in history:
96
  role = msg.get("role", "user").capitalize()
97
+ content_data = msg.get("content", "")
98
+ if isinstance(content_data, dict):
99
+ content = content_data.get("text", "")
100
+ else:
101
+ content = str(content_data)
102
+
103
  lines = content.splitlines()
104
  clean_lines = [l.strip() for l in lines if l.strip()]
105
+ clean_lines = remove_last_closing_line(clean_lines)
106
 
107
  for line in clean_lines:
108
+ wrapped = textwrap.wrap(line, width=95)
109
  for wline in wrapped:
110
+ if y < margin + line_height:
 
111
  c.showPage()
112
+ c.setFont("Helvetica", font_size)
113
+ y = page_height - margin
114
+ c.drawString(margin, y, f"{role}: {wline}" if role=="User" else wline)
115
+ y -= line_height
116
 
117
+ y -= line_height # spacing between messages
 
 
 
 
 
118
 
 
 
 
119
  c.showPage()
120
  c.save()
121
  except Exception as e:
122
  print("PDF export failed:", e)
123
  pdf_path = None
124
+
125
  return {"txt": txt_path, "pdf": pdf_path}
126
 
127
+ # ----------------------
128
+ # Core chat function
129
+ # ----------------------
130
  def generate_reply(user_msg: str, history: Optional[List[Dict[str, Any]]]):
131
  if history is None:
132
  history = []
 
134
  if not user_msg.strip():
135
  return history
136
 
 
137
  intent = None
138
  low = user_msg.lower()
139
  for key in INTENT_TEMPLATES:
 
142
  user_msg = user_msg[len(key):].strip()
143
  break
144
 
145
+
146
  system_prefix = INTENT_TEMPLATES.get(intent, None)
147
  if system_prefix:
148
  prompt = f"{system_prefix}\nUser: {user_msg}"
149
  else:
150
  prompt = f"User: {user_msg}"
151
 
152
+ bot_reply = bot.ask(prompt)
 
153
  ts = now_ts()
154
  bot_reply_ts = f"{bot_reply}\n\nπŸ•’ {ts}"
155
 
156
+ history.append({"role": "user", "content": user_msg})
157
+ history.append({"role": "assistant", "content": bot_reply_ts})
158
 
159
  try:
160
  memory.add(user_msg, bot_reply)
 
163
 
164
  return history
165
 
166
+
167
+ # ----------------------
168
+ # CUSTOM CSS
169
+ # ----------------------
170
  CUSTOM_CSS = """
171
  /* GLOBAL */
172
  .gradio-container {
 
205
  padding: 12px;
206
  }
207
 
208
+
209
  /* Input box */
210
  #message-box textarea {
211
  background: #e0e7ff !important;
 
219
  color: white !important;
220
  transition: background 0.2s ease, transform 0.2s ease;
221
  }
222
+
223
  .send-btn:hover {
224
+ background: #4338ca !important; /* darker indigo */
225
  transform: scale(1.05);
226
  }
227
 
 
230
  background: #f1f5f9 !important;
231
  transition: background 0.2s ease, transform 0.2s ease;
232
  }
233
+
234
  .icon-btn:hover {
235
+ background: #e2e8f0 !important; /* slightly darker */
236
  transform: scale(1.05);
237
  }
238
+
239
  """
240
 
241
+ #----------------------
242
+ #JS (Voice only)
243
+ #----------------------
244
+
245
  PAGE_JS = """
246
  <script>
247
  (function(){
 
260
 
261
  recog.onresult = function(e){
262
  textarea.value = e.results[0][0].transcript;
263
+
264
+ // Visual flash to confirm input
265
  textarea.style.background = "#e7f5ff";
266
  setTimeout(() => { textarea.style.background = ""; }, 400);
267
  };
 
272
  </script>
273
  """
274
 
275
+ # ----------------------
276
+ # UI
277
+ # ----------------------
278
  with gr.Blocks(title="Tayyab β€” Chatbot") as demo:
279
+ gr.HTML(f"""
280
+ <style>{CUSTOM_CSS}</style>
281
+ <script>{PAGE_JS}</script>
282
+ """)
283
 
284
  with gr.Row():
285
  with gr.Column(scale=1, min_width=220):
286
  gr.Markdown("### ⚑ Tools & Export")
287
+
288
  new_chat_btn = gr.Button("βž• New Chat")
289
  export_btn = gr.Button("πŸ“₯ Export TXT/PDF")
290
 
 
315
 
316
  new_chat_btn.click(new_chat, outputs=[chatbot])
317
 
318
+
319
  def export_handler(history):
320
+ # history is already list of dicts: [{"role": "...", "content": "..."}]
321
  files = export_chat_files(history or [])
322
+
323
  return (
324
  gr.update(value=files.get("txt"), visible=True),
325
  gr.update(value=files.get("pdf"), visible=bool(files.get("pdf")))
326
  )
327
 
 
328
 
329
+ export_btn.click(export_handler, inputs=[chatbot], outputs=[file_txt, file_pdf])
330
+
331
+
332
  if __name__ == "__main__":
333
  demo.launch()
src/chatbot.py CHANGED
@@ -7,36 +7,29 @@ from src.templates import TEMPLATES
7
  # -------------------------------
8
  # DEFAULT GENERATION ARGUMENTS
9
  # -------------------------------
 
10
  DEFAULT_GEN_ARGS = {
11
- "max_tokens": 350,
12
- "temperature": 0.6,
13
- "top_p": 0.9
14
  }
15
 
 
16
  MSG_SEPARATOR = "\n"
17
 
18
- # -------------------------------
19
- # Local Chatbot Class
20
- # -------------------------------
21
  class LocalChatbot:
22
  def __init__(self, llm, memory, default_template: Optional[str] = "general"):
23
  self.llm = llm
24
  self.memory = memory
25
  self.default_template = default_template
26
 
27
- # ------------------------------------------------
28
- # System Prompt Builder (intent β†’ template)
29
- # ------------------------------------------------
30
- def _build_system_prompt(self, intent: Optional[str]) -> str:
31
- return TEMPLATES.get(
32
- intent,
33
- TEMPLATES.get(self.default_template, TEMPLATES["general"])
34
- )
35
-
36
- # ------------------------------------------------
37
- # Chat Prompt Builder
38
- # ------------------------------------------------
39
- def _build_prompt(self, user_message: str, intent: Optional[str], max_pairs: int = 12) -> str:
40
  try:
41
  self.memory.trim_to_recent_pairs(max_pairs)
42
  except Exception:
@@ -44,59 +37,44 @@ class LocalChatbot:
44
 
45
  system_prompt = self._build_system_prompt(intent)
46
  history_text = self.memory.get_formatted(separator=MSG_SEPARATOR)
47
-
48
  parts = [
49
- f"System: {system_prompt}",
50
- history_text,
51
- f"User: {user_message}",
52
- "Assistant:"
53
- ]
54
- return MSG_SEPARATOR.join([p for p in parts if p])
55
-
56
- # ------------------------------------------------
57
- # Main Ask Function
58
- # ------------------------------------------------
59
- def ask(self, user_message: Any, gen_args: Optional[Dict[str, Any]] = None, intent: Optional[str] = None) -> str:
60
-
61
- # --- FIX #1: Extract text from Gradio compatibility formats ---
62
- if isinstance(user_message, list):
63
- extracted = []
64
- for item in user_message:
65
- if isinstance(item, dict) and "text" in item:
66
- extracted.append(item["text"])
67
- user_message = "\n".join(extracted)
68
-
69
- elif isinstance(user_message, dict) and "text" in user_message:
70
- user_message = user_message["text"]
71
-
72
- user_message = str(user_message).strip()
73
-
74
- if not user_message:
75
  return "Please enter a message."
76
 
77
- # --- FIX #2: Use passed intent, or detect if None ---
78
- if intent is None:
79
- intent = detect_intent(user_message)
80
 
81
  # Build prompt
82
  prompt = self._build_prompt(user_message, intent, max_pairs=12)
83
 
84
- # Merge generation args
 
85
  gen = DEFAULT_GEN_ARGS.copy()
86
  if gen_args:
87
  gen.update(gen_args)
88
 
89
- # Call LLM
90
  try:
91
  output = self.llm(prompt, **gen)
92
  except TypeError:
93
- # fallback: rename max_tokens β†’ max_new_tokens
94
  alt_gen = gen.copy()
95
  if "max_tokens" in alt_gen:
96
  alt_gen["max_new_tokens"] = alt_gen.pop("max_tokens")
97
  output = self.llm(prompt, **alt_gen)
98
 
99
- # Parse output
100
  bot_reply = ""
101
  try:
102
  if isinstance(output, dict) and "choices" in output:
@@ -111,7 +89,7 @@ class LocalChatbot:
111
  if not bot_reply:
112
  bot_reply = "Sorry β€” I couldn't generate a response. Please try again."
113
 
114
- # Store memory
115
  try:
116
  self.memory.add(user_message, bot_reply)
117
  except Exception:
@@ -122,3 +100,27 @@ class LocalChatbot:
122
  pass
123
 
124
  return bot_reply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  # -------------------------------
8
  # DEFAULT GENERATION ARGUMENTS
9
  # -------------------------------
10
+
11
  DEFAULT_GEN_ARGS = {
12
+ "max_tokens": 350,
13
+ "temperature": 0.6,
14
+ "top_p": 0.9
15
  }
16
 
17
+
18
  MSG_SEPARATOR = "\n"
19
 
20
+
 
 
21
  class LocalChatbot:
22
  def __init__(self, llm, memory, default_template: Optional[str] = "general"):
23
  self.llm = llm
24
  self.memory = memory
25
  self.default_template = default_template
26
 
27
+ def _build_system_prompt(self, intent: str) -> str:
28
+ # get template for intent
29
+ return TEMPLATES.get(intent, TEMPLATES.get(self.default_template, TEMPLATES["general"]))
30
+
31
+ def _build_prompt(self, user_message: str, intent: str, max_pairs: int = 12) -> str:
32
+ # Trim memory to recent pairs before building prompt
 
 
 
 
 
 
 
33
  try:
34
  self.memory.trim_to_recent_pairs(max_pairs)
35
  except Exception:
 
37
 
38
  system_prompt = self._build_system_prompt(intent)
39
  history_text = self.memory.get_formatted(separator=MSG_SEPARATOR)
40
+
41
  parts = [
42
+ f"System: {system_prompt}",
43
+ history_text,
44
+ f"User: {user_message}",
45
+ "Assistant:"
46
+ ]
47
+ # join non-empty parts
48
+ return MSG_SEPARATOR.join([p for p in parts if p is not None and p != ""])
49
+
50
+
51
+ def ask(self, user_message: str, gen_args: Optional[Dict[str, Any]] = None) -> str:
52
+ if not user_message or not user_message.strip():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  return "Please enter a message."
54
 
55
+ # Detect intent
56
+ intent = detect_intent(user_message)
 
57
 
58
  # Build prompt
59
  prompt = self._build_prompt(user_message, intent, max_pairs=12)
60
 
61
+
62
+ # Merge generation args
63
  gen = DEFAULT_GEN_ARGS.copy()
64
  if gen_args:
65
  gen.update(gen_args)
66
 
67
+ # Attempt to call the LLM (defensive: handle different API variants)
68
  try:
69
  output = self.llm(prompt, **gen)
70
  except TypeError:
71
+ # fallback mapping: map max_tokens -> max_new_tokens
72
  alt_gen = gen.copy()
73
  if "max_tokens" in alt_gen:
74
  alt_gen["max_new_tokens"] = alt_gen.pop("max_tokens")
75
  output = self.llm(prompt, **alt_gen)
76
 
77
+ # Parse the output robustly
78
  bot_reply = ""
79
  try:
80
  if isinstance(output, dict) and "choices" in output:
 
89
  if not bot_reply:
90
  bot_reply = "Sorry β€” I couldn't generate a response. Please try again."
91
 
92
+ # Add to memory
93
  try:
94
  self.memory.add(user_message, bot_reply)
95
  except Exception:
 
100
  pass
101
 
102
  return bot_reply
103
+
104
+
105
+
106
+
107
+ # # Create the generation args HERE
108
+ # gen = DEFAULT_GEN_ARGS.copy()
109
+ # if gen_args:
110
+ # gen.update(gen_args)
111
+
112
+ # response = self.llm(
113
+ # prompt,
114
+ # max_tokens=gen["max_tokens"],
115
+ # temperature=gen["temperature"],
116
+ # top_p=gen["top_p"],
117
+
118
+ # stop=["</system>", "\nUser:", "\nUser says:", "\nSystem:", "\nAssistant:", "\nYou:"]
119
+
120
+
121
+ # )
122
+
123
+ # reply = response["choices"][0]["text"].strip()
124
+
125
+ # self.memory.add(user_message, reply)
126
+ # return reply