kunjshah01 commited on
Commit
31c11ee
·
verified ·
1 Parent(s): 16cb698

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -71
app.py CHANGED
@@ -3,46 +3,31 @@ import requests
3
  import json
4
  import datetime
5
  import os
6
- from dotenv import load_dotenv
7
-
8
- # Load environment variables from .env file
9
- load_dotenv(dotenv_path=os.path.join(os.path.dirname(__file__), ".env"))
10
-
11
- # Nebius API configuration
12
- NEBIUS_API_URL = os.getenv("NEBIUS_API_URL")
13
- NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
14
 
 
 
 
15
 
16
  # --- MCP Protocol Support ---
17
- # This is a placeholder for MCP integration. In a real scenario, you would use the MCP protocol to wrap/unwrap requests and responses.
18
  def mcp_supported_call(payload, endpoint, headers):
19
- # Here, you could add MCP-specific headers or payload structure if needed
20
- # For now, this just passes through to the Nebius API
21
  response = requests.post(endpoint, json=payload, headers=headers)
22
  return response
23
 
24
-
25
- # Function to call Nebius API directly (now MCP supported)
26
  def call_nebius_api(query, context_data=""):
27
  try:
28
- # Prepare payload for Nebius API
29
  nebius_payload = {
30
  "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
31
  "messages": [{"role": "user", "content": query}],
32
  "max_tokens": 1000,
33
  "temperature": 0.7,
34
  }
35
-
36
- # Call Nebius API
37
  headers = {
38
  "Authorization": f"Bearer {NEBIUS_API_KEY}",
39
  "Content-Type": "application/json",
40
  }
41
  response = mcp_supported_call(nebius_payload, NEBIUS_API_URL, headers)
42
-
43
  if response.status_code != 200:
44
  return f"Error: Nebius API request failed - {response.text}"
45
-
46
  nebius_response = response.json()
47
  result = (
48
  nebius_response.get("choices", [{}])[0]
@@ -50,12 +35,9 @@ def call_nebius_api(query, context_data=""):
50
  .get("content", "No response")
51
  )
52
  return result
53
-
54
  except Exception as e:
55
  return f"Error: {str(e)}"
56
 
57
-
58
- # Function to humanize AI text using another API call with a different model (now MCP supported)
59
  def humanize_text(ai_response):
60
  try:
61
  humanize_prompt = f"""Please rewrite the following AI response to make it sound more natural, conversational, and human-like.
@@ -65,39 +47,28 @@ def humanize_text(ai_response):
65
  {ai_response}
66
 
67
  Humanized version:"""
68
-
69
- # Use DeepSeek model for humanization - excellent at conversational and creative responses
70
  nebius_payload = {
71
- "model": "deepseek-ai/DeepSeek-R1", # DeepSeek model for humanization
72
  "messages": [{"role": "user", "content": humanize_prompt}],
73
  "max_tokens": 1200,
74
- "temperature": 0.9, # Higher temperature for more creative/human-like responses
75
  }
76
-
77
  headers = {
78
  "Authorization": f"Bearer {NEBIUS_API_KEY}",
79
  "Content-Type": "application/json",
80
  }
81
  response = mcp_supported_call(nebius_payload, NEBIUS_API_URL, headers)
82
-
83
  if response.status_code != 200:
84
- return ai_response # Return original response if humanization fails
85
-
86
  nebius_response = response.json()
87
  humanized_result = (
88
  nebius_response.get("choices", [{}])[0]
89
  .get("message", {})
90
  .get("content", ai_response)
91
  )
92
- # Only return the humanized response, not the prompt or any instructions
93
- # Remove everything before the first line break if the model echoes the prompt or instructions
94
  if "Humanized version:" in humanized_result:
95
- humanized_result = humanized_result.split("Humanized version:", 1)[
96
- -1
97
- ].strip()
98
- # Remove any leading prompt/instruction lines (e.g., if model repeats the prompt or says what it's doing)
99
  lines = humanized_result.splitlines()
100
- # Remove lines that look like instructions or meta-comments
101
  filtered_lines = [
102
  line
103
  for line in lines
@@ -118,43 +89,30 @@ def humanize_text(ai_response):
118
  )
119
  )
120
  ]
121
- # Join the remaining lines, strip leading/trailing whitespace
122
  cleaned = "\n".join(filtered_lines).strip()
123
- # If nothing left after cleaning, fall back to the original humanized_result
124
  return cleaned if cleaned else humanized_result
125
-
126
  except Exception as e:
127
- return ai_response # Return original response if humanization fails
128
-
129
 
130
- # --- Additional Functionality ---
131
  def save_conversation(query, ai_response, humanized_response, context_data):
132
- """Save the conversation to a local file with timestamp."""
133
  timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
134
  with open("conversation_history.txt", "a", encoding="utf-8") as f:
135
  f.write(
136
  f"[{timestamp}]\nQuery: {query}\nContext: {context_data}\nAI Response: {ai_response}\nHumanized: {humanized_response}\n{'-' * 40}\n"
137
  )
138
 
139
-
140
  def clear_history():
141
- """Clear the conversation history file."""
142
  open("conversation_history.txt", "w").close()
143
  return "History cleared."
144
 
145
-
146
  def load_history():
147
- """Load the conversation history file."""
148
  try:
149
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
150
  return f.read()
151
  except FileNotFoundError:
152
  return "No history found."
153
 
154
-
155
- # --- More Advanced Features ---
156
  def export_history_to_file(filename="conversation_export.txt"):
157
- """Export the conversation history to a user-specified file."""
158
  try:
159
  with (
160
  open("conversation_history.txt", "r", encoding="utf-8") as src,
@@ -165,9 +123,7 @@ def export_history_to_file(filename="conversation_export.txt"):
165
  except Exception as e:
166
  return f"Export failed: {e}"
167
 
168
-
169
  def search_history(keyword):
170
- """Search for a keyword in the conversation history."""
171
  try:
172
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
173
  lines = f.readlines()
@@ -176,14 +132,12 @@ def search_history(keyword):
176
  except FileNotFoundError:
177
  return "No history found."
178
 
179
-
180
  def delete_last_conversation():
181
- """Delete the last conversation from the history file."""
182
  try:
183
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
184
  content = f.read().strip().split("-" * 40)
185
  if len(content) > 1:
186
- content = content[:-1] # Remove last conversation
187
  with open("conversation_history.txt", "w", encoding="utf-8") as f:
188
  f.write(("-" * 40).join(content).strip())
189
  return "Last conversation deleted."
@@ -193,28 +147,18 @@ def delete_last_conversation():
193
  except FileNotFoundError:
194
  return "No history found."
195
 
196
-
197
- # Gradio interface function
198
  def gradio_interface(query, context_data, humanize=False, save=False):
199
  if not query.strip():
200
  return "Please enter a query.", "", load_history()
201
-
202
- # Get initial AI response
203
  ai_response = call_nebius_api(query, context_data)
204
-
205
- # If humanization is enabled and we got a valid response, humanize it
206
  if humanize and not ai_response.startswith("Error:"):
207
  humanized_response = humanize_text(ai_response)
208
  else:
209
  humanized_response = ""
210
-
211
  if save:
212
  save_conversation(query, ai_response, humanized_response, context_data)
213
-
214
  return ai_response, humanized_response, load_history()
215
 
216
-
217
- # Create Gradio UI
218
  def create_gradio_app():
219
  with gr.Blocks() as demo:
220
  gr.Markdown("# MCP-Powered Chatbot with Nebius API & Text Humanization")
@@ -265,7 +209,6 @@ def create_gradio_app():
265
  search_result = gr.Textbox(
266
  label="Search Results", value="", lines=5, interactive=False
267
  )
268
- # Add event handlers for new features
269
  submit_button.click(
270
  fn=gradio_interface,
271
  inputs=[query_input, context_input, humanize_checkbox, save_checkbox],
@@ -303,15 +246,14 @@ def create_gradio_app():
303
 
304
  return demo
305
 
306
-
307
  if __name__ == "__main__":
308
  print("Starting Gradio Interface...")
309
  try:
310
  demo = create_gradio_app()
311
  print("Gradio app created successfully")
312
  demo.launch(
313
- server_name="127.0.0.1", # Changed to localhost only
314
- server_port=7870, # Changed back to 7860 to avoid conflicts
315
  share=False,
316
  debug=True,
317
  show_error=True,
@@ -319,5 +261,4 @@ if __name__ == "__main__":
319
  except Exception as e:
320
  print(f"Error launching Gradio app: {e}")
321
  import traceback
322
-
323
- traceback.print_exc()
 
3
  import json
4
  import datetime
5
  import os
 
 
 
 
 
 
 
 
6
 
7
+ # Nebius API configuration (hardcoded)
8
+ NEBIUS_API_URL = "https://api.studio.nebius.ai/v1/chat/completions"
9
+ NEBIUS_API_KEY = "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDExMDkwNDYwNzI2NjMxOTY2NDYyMSIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjc4ODk3OSwidXVpZCI6IjBiMDc5OGI4LTdkZjctNDcxMi05ZTY0LTZiNmU5OTk0OWRmNyIsIm5hbWUiOiJNQ1AgU0VSVkVSIiwiZXhwaXJlc19hdCI6IjIwMzAtMDYtMDRUMDc6MzY6MTkrMDAwMCJ9.-RG1eCxfuO9bqmTa00pHCAb6L47IWEFHVxq3xqHrjU8"
10
 
11
  # --- MCP Protocol Support ---
 
12
  def mcp_supported_call(payload, endpoint, headers):
 
 
13
  response = requests.post(endpoint, json=payload, headers=headers)
14
  return response
15
 
 
 
16
  def call_nebius_api(query, context_data=""):
17
  try:
 
18
  nebius_payload = {
19
  "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
20
  "messages": [{"role": "user", "content": query}],
21
  "max_tokens": 1000,
22
  "temperature": 0.7,
23
  }
 
 
24
  headers = {
25
  "Authorization": f"Bearer {NEBIUS_API_KEY}",
26
  "Content-Type": "application/json",
27
  }
28
  response = mcp_supported_call(nebius_payload, NEBIUS_API_URL, headers)
 
29
  if response.status_code != 200:
30
  return f"Error: Nebius API request failed - {response.text}"
 
31
  nebius_response = response.json()
32
  result = (
33
  nebius_response.get("choices", [{}])[0]
 
35
  .get("content", "No response")
36
  )
37
  return result
 
38
  except Exception as e:
39
  return f"Error: {str(e)}"
40
 
 
 
41
  def humanize_text(ai_response):
42
  try:
43
  humanize_prompt = f"""Please rewrite the following AI response to make it sound more natural, conversational, and human-like.
 
47
  {ai_response}
48
 
49
  Humanized version:"""
 
 
50
  nebius_payload = {
51
+ "model": "deepseek-ai/DeepSeek-R1",
52
  "messages": [{"role": "user", "content": humanize_prompt}],
53
  "max_tokens": 1200,
54
+ "temperature": 0.9,
55
  }
 
56
  headers = {
57
  "Authorization": f"Bearer {NEBIUS_API_KEY}",
58
  "Content-Type": "application/json",
59
  }
60
  response = mcp_supported_call(nebius_payload, NEBIUS_API_URL, headers)
 
61
  if response.status_code != 200:
62
+ return ai_response
 
63
  nebius_response = response.json()
64
  humanized_result = (
65
  nebius_response.get("choices", [{}])[0]
66
  .get("message", {})
67
  .get("content", ai_response)
68
  )
 
 
69
  if "Humanized version:" in humanized_result:
70
+ humanized_result = humanized_result.split("Humanized version:", 1)[-1].strip()
 
 
 
71
  lines = humanized_result.splitlines()
 
72
  filtered_lines = [
73
  line
74
  for line in lines
 
89
  )
90
  )
91
  ]
 
92
  cleaned = "\n".join(filtered_lines).strip()
 
93
  return cleaned if cleaned else humanized_result
 
94
  except Exception as e:
95
+ return ai_response
 
96
 
 
97
  def save_conversation(query, ai_response, humanized_response, context_data):
 
98
  timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
99
  with open("conversation_history.txt", "a", encoding="utf-8") as f:
100
  f.write(
101
  f"[{timestamp}]\nQuery: {query}\nContext: {context_data}\nAI Response: {ai_response}\nHumanized: {humanized_response}\n{'-' * 40}\n"
102
  )
103
 
 
104
  def clear_history():
 
105
  open("conversation_history.txt", "w").close()
106
  return "History cleared."
107
 
 
108
  def load_history():
 
109
  try:
110
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
111
  return f.read()
112
  except FileNotFoundError:
113
  return "No history found."
114
 
 
 
115
  def export_history_to_file(filename="conversation_export.txt"):
 
116
  try:
117
  with (
118
  open("conversation_history.txt", "r", encoding="utf-8") as src,
 
123
  except Exception as e:
124
  return f"Export failed: {e}"
125
 
 
126
  def search_history(keyword):
 
127
  try:
128
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
129
  lines = f.readlines()
 
132
  except FileNotFoundError:
133
  return "No history found."
134
 
 
135
  def delete_last_conversation():
 
136
  try:
137
  with open("conversation_history.txt", "r", encoding="utf-8") as f:
138
  content = f.read().strip().split("-" * 40)
139
  if len(content) > 1:
140
+ content = content[:-1]
141
  with open("conversation_history.txt", "w", encoding="utf-8") as f:
142
  f.write(("-" * 40).join(content).strip())
143
  return "Last conversation deleted."
 
147
  except FileNotFoundError:
148
  return "No history found."
149
 
 
 
150
  def gradio_interface(query, context_data, humanize=False, save=False):
151
  if not query.strip():
152
  return "Please enter a query.", "", load_history()
 
 
153
  ai_response = call_nebius_api(query, context_data)
 
 
154
  if humanize and not ai_response.startswith("Error:"):
155
  humanized_response = humanize_text(ai_response)
156
  else:
157
  humanized_response = ""
 
158
  if save:
159
  save_conversation(query, ai_response, humanized_response, context_data)
 
160
  return ai_response, humanized_response, load_history()
161
 
 
 
162
  def create_gradio_app():
163
  with gr.Blocks() as demo:
164
  gr.Markdown("# MCP-Powered Chatbot with Nebius API & Text Humanization")
 
209
  search_result = gr.Textbox(
210
  label="Search Results", value="", lines=5, interactive=False
211
  )
 
212
  submit_button.click(
213
  fn=gradio_interface,
214
  inputs=[query_input, context_input, humanize_checkbox, save_checkbox],
 
246
 
247
  return demo
248
 
 
249
  if __name__ == "__main__":
250
  print("Starting Gradio Interface...")
251
  try:
252
  demo = create_gradio_app()
253
  print("Gradio app created successfully")
254
  demo.launch(
255
+ server_name="127.0.0.1",
256
+ server_port=7870,
257
  share=False,
258
  debug=True,
259
  show_error=True,
 
261
  except Exception as e:
262
  print(f"Error launching Gradio app: {e}")
263
  import traceback
264
+ traceback.print_exc()