ahfmrptEkd commited on
Commit
48ac4f0
·
1 Parent(s): 81917a3

Update app.py

Browse files
Files changed (6) hide show
  1. .env.example +5 -0
  2. .gitignore +5 -0
  3. agents.py +184 -0
  4. app.py +30 -7
  5. requirements.txt +15 -1
  6. tools.py +672 -0
.env.example ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Google Gemini API Key
2
+ GOOGLE_API_KEY="your-api-key"
3
+
4
+ # GEMINI_API_KEY for agents.py
5
+ GEMINI_API_KEY="your-api-key"
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .env
2
+ venv/
3
+ __pycache__/
4
+ *.pyc
5
+ .DS_Store
agents.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict, Annotated
2
+ from langgraph.graph.message import add_messages
3
+ from langchain_core.messages import AnyMessage, HumanMessage, AIMessage
4
+ from langgraph.prebuilt import ToolNode
5
+ from langgraph.graph import START, StateGraph
6
+ from langgraph.prebuilt import tools_condition
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ import os
9
+ from dotenv import load_dotenv
10
+ from tools import google_grounding_search, execute_python, process_image, download_files_from_api, process_code_file, process_csv, process_pdf, process_excel, process_archive, read_text_file, process_audio
11
+
12
+ load_dotenv()
13
+
14
+ # System prompt for GAIA benchmark
15
+ SYSTEM_PROMPT = """You are an expert AI assistant designed to solve GAIA benchmark questions. Your primary goal is to provide accurate, concise, and precisely formatted answers.
16
+ ANSWER FORMAT RULES:
17
+ - Provide ONLY the final answer. Do NOT include any prefixes like "FINAL ANSWER:", "Answer:", or "The answer is:".
18
+ - If the answer is a number: Do NOT include commas or units (unless the question explicitly asks for units). Provide only the numerical value.
19
+ - If the answer is a string: Do NOT include articles (a/an/the) or abbreviations. Digits should be in plain text (e.g., "one", "two").
20
+ - If the answer is a list: Provide a comma-separated list. Each element in the list must adhere to the above rules for numbers or strings.
21
+ PROBLEM-SOLVING APPROACH:
22
+ 1. Analyze the question carefully to understand the core problem and required output format.
23
+ 2. Devise a comprehensive plan to solve the problem, considering all necessary steps and aiming to minimize tool calls for efficiency.
24
+ 3. Determine which tools are necessary to gather information, perform calculations, or process data.
25
+ 4. Execute tools step-by-step, verifying intermediate results.
26
+ 5. Synthesize information from tool outputs to formulate the final answer.
27
+ 6. Ensure the final answer strictly adheres to the ANSWER FORMAT RULES.
28
+ TOOLS AVAILABLE:
29
+ - google_grounding_search(query: str): Use this for general web searches, current events, or information not available in your training data.
30
+ - execute_python(code: str): Use this for complex calculations, data manipulation, or running Python scripts.
31
+ - process_image(image_path: str): Use this to analyze local image files, extract text, or get visual descriptions.
32
+ - download_files_from_api(task_id: str, file_extension: str = None): Use this ONLY when the question explicitly mentions files, attachments, or uploaded content associated with a task ID.
33
+ - process_code_file(code_file_path: str): Use this to read and execute local code files (currently supports Python).
34
+ - process_csv(csv_path: str, operation: str = "summary", params: dict = None): Use this to analyze and extract data from local CSV files.
35
+ - process_pdf(pdf_path: str): Use this to extract text content from local PDF files.
36
+ - process_excel(excel_path: str, operation: str = "summary", params: dict = None): Use this to analyze and extract data from local Excel files.
37
+ - process_archive(archive_path: str, operation: str = "list", extract_to: str = None): Use this to list or extract contents of local .zip archive files.
38
+ - read_text_file(file_path: str): Use this to read the content of any local text-based file (e.g., .txt, .md, .json).
39
+ - process_audio(audio_path: str): Use this to transcribe and analyze local audio files.
40
+ - process_youtube_video(url: str, question: str): Use this ONLY when a YouTube URL is provided in the question to analyze video content.
41
+ Be precise and methodical in your approach. Your answer will be compared for exact match against the benchmark solution."""
42
+
43
+ class GaiaAgent:
44
+ def __init__(self):
45
+ """Initialize the GAIA agent with Gemini and tools"""
46
+
47
+ # Get API key - works both locally (.env) and on HF Spaces (secrets)
48
+ self.api_key = os.getenv("GEMINI_API_KEY")
49
+ if not self.api_key:
50
+ raise ValueError("GEMINI_API_KEY not found in environment variables")
51
+
52
+ # Initialize chat model
53
+ self.chat = ChatGoogleGenerativeAI(
54
+ model="gemini-2.5-flash",
55
+ temperature=0.1, # Lower temperature for consistent answers
56
+ )
57
+
58
+ # Set up tools
59
+ self.tools = [
60
+ google_grounding_search,
61
+ execute_python,
62
+ process_image,
63
+ download_files_from_api,
64
+ process_code_file,
65
+ process_csv,
66
+ process_pdf,
67
+ process_excel,
68
+ process_archive,
69
+ read_text_file,
70
+ process_audio
71
+ ]
72
+ self.chat_with_tools = self.chat.bind_tools(self.tools)
73
+
74
+ # Build the LangGraph workflow
75
+ self.agent = self._build_agent()
76
+
77
+ def _build_agent(self):
78
+ """Build the LangGraph agent workflow"""
79
+
80
+ # Define agent state
81
+ class AgentState(TypedDict):
82
+ messages: Annotated[list[AnyMessage], add_messages]
83
+
84
+ def assistant(state: AgentState):
85
+ """Main assistant node"""
86
+ return {
87
+ "messages": [self.chat_with_tools.invoke(state["messages"])],
88
+ }
89
+
90
+ # Build the graph
91
+ builder = StateGraph(AgentState)
92
+
93
+ # Define nodes
94
+ builder.add_node("assistant", assistant)
95
+ builder.add_node("tools", ToolNode(self.tools))
96
+
97
+ # Define edges
98
+ builder.add_edge(START, "assistant")
99
+ builder.add_conditional_edges(
100
+ "assistant",
101
+ tools_condition, # If tools needed, go to tools; otherwise end
102
+ )
103
+ builder.add_edge("tools", "assistant")
104
+
105
+ return builder.compile()
106
+
107
+ def __call__(self, question: str) -> str:
108
+ """Main interface for app.py - solve a question and return clean answer"""
109
+ return self.solve_question(question)
110
+
111
+
112
+ def solve_question(self, question: str) -> str:
113
+ """
114
+ Solve a GAIA question and return the final answer
115
+
116
+ Args:
117
+ question (str): The GAIA question to solve (may include TASK_ID metadata)
118
+
119
+ Returns:
120
+ str: Clean final answer for exact match scoring
121
+ """
122
+ try:
123
+ # Extract task_id if present in the question format
124
+ task_id = None
125
+ actual_question = question
126
+
127
+ if question.startswith("TASK_ID:"):
128
+ lines = question.split("\n", 2)
129
+ if len(lines) >= 3 and lines[1] == "" and lines[2].startswith("QUESTION:"):
130
+ task_id = lines[0].replace("TASK_ID:", "").strip()
131
+ actual_question = lines[2].replace("QUESTION:", "").strip()
132
+ print(f"Extracted task_id: {task_id}")
133
+
134
+ # Create enhanced system prompt with task_id context if available
135
+ system_prompt = SYSTEM_PROMPT
136
+ if task_id:
137
+ system_prompt += f"\n\nIMPORTANT: This question has task_id '{task_id}'. ONLY use the download_files_from_api tool if the question explicitly references files, attachments, or uploaded content (e.g., 'in the image', 'attached file', 'spreadsheet', 'document', 'audio file'). Do not attempt to download files for general knowledge questions."
138
+
139
+ # Create initial message with system prompt and actual question
140
+ messages = [
141
+ HumanMessage(content=f"{system_prompt}\n\nQuestion: {actual_question}")
142
+ ]
143
+
144
+ # Run the agent
145
+ response = self.agent.invoke({"messages": messages})
146
+
147
+ # Extract the final answer from the last message
148
+ final_message = response['messages'][-1]
149
+ final_answer = final_message.content.strip()
150
+
151
+ # Clean up the answer - remove any potential prefixes
152
+ prefixes_to_remove = [
153
+ "FINAL ANSWER:",
154
+ "Final Answer:",
155
+ "Answer:",
156
+ "The answer is:",
157
+ "The final answer is:",
158
+ "Result:",
159
+ ]
160
+
161
+ for prefix in prefixes_to_remove:
162
+ if final_answer.startswith(prefix):
163
+ final_answer = final_answer[len(prefix):].strip()
164
+
165
+ return final_answer
166
+
167
+ except Exception as e:
168
+ print(f"Error solving question: {e}")
169
+ return f"Error: Unable to solve question - {str(e)}"
170
+
171
+ # For backward compatibility and testing
172
+ def create_agent():
173
+ """Factory function to create a GAIA agent"""
174
+ return GaiaAgent()
175
+
176
+ # For direct testing (remove this section before deployment if desired)
177
+ if __name__ == "__main__":
178
+ try:
179
+ agent = GaiaAgent()
180
+ test_question = "search the web for 42nd president and their wifes name"
181
+ result = agent.solve_question(test_question)
182
+ print(f"Test result: {result}")
183
+ except Exception as e:
184
+ print(f"Test failed: {e}")
app.py CHANGED
@@ -4,20 +4,43 @@ import requests
4
  import inspect
5
  import pandas as pd
6
 
 
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
14
  def __init__(self):
15
  print("BasicAgent initialized.")
 
 
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -80,7 +103,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
 
 
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
@@ -146,11 +171,9 @@ with gr.Blocks() as demo:
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
-
150
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
  ---
155
  **Disclaimers:**
156
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
 
4
  import inspect
5
  import pandas as pd
6
 
7
+ # Import your agent from agents.py
8
+ from agents import GaiaAgent
9
+
10
  # (Keep Constants as is)
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
  # --- Basic Agent Definition ---
15
+ # ----- THIS IS WHERE YOU PLUG IN YOUR AGENT ------
16
  class BasicAgent:
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
+ # Initialize your GaiaAgent
20
+ self.agent = GaiaAgent()
21
+
22
  def __call__(self, question: str) -> str:
23
  print(f"Agent received question (first 50 chars): {question[:50]}...")
24
+
25
+ try:
26
+ # Add a delay to avoid rate limiting
27
+ import time
28
+ print(f"Waiting 7 seconds to respect rate limits...")
29
+ time.sleep(15) # 15 second delay between questions
30
+
31
+ # Use your agent's solve_question method or call directly
32
+ # Your GaiaAgent has __call__ method, so we can call it directly
33
+ answer = self.agent(question)
34
+
35
+ print(f"Agent returning answer: {answer}")
36
+ return str(answer) # Ensure we return a string
37
+
38
+ except Exception as e:
39
+ print(f"Error in agent execution: {e}")
40
+ # Return a more graceful error message for rate limiting
41
+ if "quota" in str(e).lower() or "rate" in str(e).lower():
42
+ return "Rate limit exceeded - please try again later"
43
+ return f"Error: {str(e)}"
44
 
45
  def run_and_submit_all( profile: gr.OAuthProfile | None):
46
  """
 
103
  print(f"Skipping item with missing task_id or question: {item}")
104
  continue
105
  try:
106
+ # Pass both question and task_id to the agent
107
+ question_with_metadata = f"TASK_ID: {task_id}\n\nQUESTION: {question_text}"
108
+ submitted_answer = agent(question_with_metadata)
109
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
110
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
111
  except Exception as e:
 
171
  gr.Markdown(
172
  """
173
  **Instructions:**
 
174
  1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
175
  2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
176
  3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
177
  ---
178
  **Disclaimers:**
179
  Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
requirements.txt CHANGED
@@ -1,2 +1,16 @@
1
  gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ langgraph
4
+ langchain-core
5
+ langchain-google-genai
6
+ python-dotenv
7
+ pandas
8
+ google-generativeai
9
+ langchain_experimental
10
+ pypdf
11
+ openai-whisper
12
+ torch
13
+ yt_dlp
14
+ openpyxl
15
+ xlrd
16
+ google-genai
tools.py ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.tools import tool
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from langchain_community.tools import DuckDuckGoSearchRun
5
+ from langchain_experimental.tools import PythonREPLTool
6
+ from langchain_core.messages import HumanMessage
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ import requests
9
+ import base64
10
+ import tempfile
11
+ import pypdf
12
+ import pandas
13
+ import zipfile
14
+ from pathlib import Path
15
+ import mimetypes
16
+ from typing import Optional
17
+ import whisper
18
+ import torch
19
+ import yt_dlp
20
+ import google.generativeai as genai
21
+ import time
22
+
23
+ load_dotenv()
24
+
25
+ vision_llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
26
+ # Create the underlying REPL tool
27
+ #_python_repl = PythonREPLTool()
28
+
29
+
30
+ @tool
31
+ def google_grounding_search(query: str) -> str:
32
+ """
33
+ Search for current information using Google's grounded search.
34
+
35
+ Use this tool when you need:
36
+ - Latest/current information (news, events, prices, etc.)
37
+ - Real-time data that might not be in your training
38
+ - Recent developments or updates
39
+ - Current facts to supplement your knowledge
40
+
41
+ Args:
42
+ query: Search query (be specific and focused)
43
+
44
+ Returns:
45
+ Current information from Google search with citations
46
+
47
+ Example usage:
48
+ - google_grounding_search("latest AI news January 2025")
49
+ - google_grounding_search("current Tesla stock price")
50
+ - google_grounding_search("Manchester United new signings 2025")
51
+ """
52
+ try:
53
+ # Import the newer Google genai library
54
+ from google import genai
55
+ from google.genai import types
56
+ import os
57
+
58
+ # Get API key from environment
59
+ api_key = os.getenv("GEMINI_API_KEY")
60
+ if not api_key:
61
+ return "Error: GEMINI_API_KEY not found in environment variables"
62
+
63
+ # Initialize client and grounding tool
64
+ client = genai.Client(api_key=api_key)
65
+ grounding_tool = types.Tool(google_search=types.GoogleSearch())
66
+
67
+ # Configure for grounding
68
+ grounding_config = types.GenerateContentConfig(
69
+ tools=[grounding_tool]
70
+ )
71
+
72
+ #print(f"🔎 Performing grounded search for: {query}")
73
+
74
+ # Make grounded search request
75
+ response = client.models.generate_content(
76
+ model="gemini-2.0-flash",
77
+ contents=f"Search for and provide current information about: {query}",
78
+ config=grounding_config
79
+ )
80
+
81
+ result = response.text.strip()
82
+
83
+ if not result:
84
+ return "No results found from grounded search"
85
+
86
+ return f"Current Information (via Google Search):\n{result}"
87
+
88
+ except ImportError as e:
89
+ return f"Error: google-genai library not available. Import error: {str(e)}"
90
+ except Exception as e:
91
+ return f"Error performing grounded search: {str(e)}"
92
+
93
+ @tool
94
+ def execute_python(code: str) -> str:
95
+ """Execute Python code for mathematical calculations, data analysis, and general computation.
96
+
97
+ Args:
98
+ code: Valid Python code to execute
99
+
100
+ Returns:
101
+ The output/result of the executed code
102
+ """
103
+ try:
104
+ # For simple calculations, use eval
105
+ if all(char in "0123456789+-*/.() " for char in code.strip()):
106
+ result = eval(code)
107
+ return str(result)
108
+
109
+ # For more complex code, use exec with captured output
110
+ import io
111
+ import sys
112
+ from contextlib import redirect_stdout
113
+
114
+ # Capture stdout
115
+ captured_output = io.StringIO()
116
+ local_vars = {}
117
+
118
+ with redirect_stdout(captured_output):
119
+ exec(code, {"__builtins__": __builtins__}, local_vars)
120
+
121
+ output = captured_output.getvalue().strip()
122
+
123
+ # If no output was printed, try to return the last variable value
124
+ if not output and local_vars:
125
+ # Get the last defined variable
126
+ last_var = list(local_vars.values())[-1] if local_vars else None
127
+ if last_var is not None:
128
+ return str(last_var)
129
+
130
+ return output if output else "Code executed successfully (no output)"
131
+
132
+ except Exception as e:
133
+ return f"Error executing code: {str(e)}"
134
+
135
+ @tool
136
+ def download_files_from_api(task_id: str, file_extension: str = None) -> str:
137
+ """Downloads a file (image, PDF, CSV, code, audio, Excel, etc.) associated with a task ID from the API.
138
+ The file is saved to a temporary location, and its local path is returned.
139
+
140
+ Args:
141
+ task_id: The task ID for which to download the file.
142
+ file_extension: Optional. The expected file extension (e.g., ".py", ".csv", ".pdf").
143
+ If provided, this will be used for the temporary file.
144
+ Otherwise, the extension will be inferred from the Content-Type header.
145
+
146
+ Returns:
147
+ The absolute path to the downloaded file, or an error message.
148
+ """
149
+ try:
150
+ api_url = "https://agents-course-unit4-scoring.hf.space"
151
+ response = requests.get(f"{api_url}/files/{task_id}", timeout=30)
152
+ response.raise_for_status()
153
+
154
+ ext = file_extension
155
+ if not ext:
156
+ # Determine file extension from headers or default to .bin
157
+ content_type = response.headers.get('Content-Type', '')
158
+ if 'image/jpeg' in content_type:
159
+ ext = '.jpg'
160
+ elif 'image/png' in content_type:
161
+ ext = '.png'
162
+ elif 'application/pdf' in content_type:
163
+ ext = '.pdf'
164
+ elif 'text/csv' in content_type:
165
+ ext = '.csv'
166
+ elif 'text/x-python' in content_type or 'application/x-python-code' in content_type:
167
+ ext = '.py'
168
+ elif 'audio/mpeg' in content_type:
169
+ ext = '.mp3'
170
+ elif 'audio/wav' in content_type:
171
+ ext = '.wav'
172
+ elif 'application/vnd.ms-excel' in content_type:
173
+ ext = '.xls'
174
+ elif 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' in content_type:
175
+ ext = '.xlsx'
176
+ elif 'application/zip' in content_type:
177
+ ext = '.zip'
178
+ elif 'text/plain' in content_type:
179
+ ext = '.txt'
180
+ else:
181
+ ext = '.bin' # Default for unknown types
182
+
183
+ # Create a temporary file to save the content
184
+ with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as temp_file:
185
+ temp_file.write(response.content)
186
+ file_path = temp_file.name
187
+
188
+ print(f"Downloaded file for task {task_id} to: {file_path}")
189
+ return file_path
190
+
191
+ except requests.exceptions.RequestException as e:
192
+ return f"Error downloading file from API: {str(e)}"
193
+ except Exception as e:
194
+ return f"An unexpected error occurred: {str(e)}"
195
+
196
+ @tool
197
+ def process_image(image_path: str) -> str:
198
+ """Analyze an image file from a local path - extract any text present and provide visual description.
199
+ This tool can handle various image formats like PNG, JPEG, GIF, etc.
200
+
201
+ Args:
202
+ image_path: The absolute path to the local image file.
203
+
204
+ Returns:
205
+ Extracted text (if any) and visual description of the image.
206
+ """
207
+ try:
208
+ # Dynamically determine the MIME type
209
+ import mimetypes
210
+ mime_type, _ = mimetypes.guess_type(image_path)
211
+ if mime_type is None:
212
+ # Default to a common type if detection fails
213
+ mime_type = "application/octet-stream"
214
+
215
+ with open(image_path, "rb") as image_file:
216
+ image_bytes = image_file.read()
217
+ image_base64 = base64.b64encode(image_bytes).decode("utf-8")
218
+
219
+ # First call: Extract text
220
+ text_message = [
221
+ HumanMessage(
222
+ content=[
223
+ {
224
+ "type": "text",
225
+ "text": (
226
+ "Extract all the text from this image. "
227
+ "Return only the extracted text, no explanations. "
228
+ "If no text is found, respond with 'No text found'."
229
+ ),
230
+ },
231
+ {
232
+ "type": "image_url",
233
+ "image_url": {
234
+ "url": f"data:{mime_type};base64,{image_base64}"
235
+ },
236
+ },
237
+ ]
238
+ )
239
+ ]
240
+
241
+ text_response = vision_llm.invoke(text_message)
242
+ extracted_text = text_response.content.strip()
243
+
244
+ # Second call: Get description
245
+ description_message = [
246
+ HumanMessage(
247
+ content=[
248
+ {
249
+ "type": "text",
250
+ "text": (
251
+ "Describe what you see in this image in detail. "
252
+ "Be specific about objects, positions, colors, text, numbers, "
253
+ "and any other relevant visual information."
254
+ ),
255
+ },
256
+ {
257
+ "type": "image_url",
258
+ "image_url": {
259
+ "url": f"data:{mime_type};base64,{image_base64}"
260
+ },
261
+ },
262
+ ]
263
+ )
264
+ ]
265
+
266
+ description_response = vision_llm.invoke(description_message)
267
+ description = description_response.content.strip()
268
+
269
+ # Format the combined result
270
+ result = f"TEXT EXTRACTED:\n{extracted_text}\n\nVISUAL DESCRIPTION:\n{description}"
271
+
272
+ return result
273
+
274
+ except FileNotFoundError:
275
+ return f"Error: Image file not found at {image_path}"
276
+ except Exception as e:
277
+ return f"Error processing image: {str(e)}"
278
+
279
+ @tool
280
+ def process_pdf(pdf_path: str) -> str:
281
+ """Extracts all text content from a PDF file.
282
+ Args:
283
+ pdf_path: The absolute path to the local PDF file.
284
+ Returns:
285
+ A string containing all extracted text from the PDF, or an error message.
286
+ """
287
+ try:
288
+ reader = pypdf.PdfReader(pdf_path)
289
+ text = ""
290
+ for page in reader.pages:
291
+ text += page.extract_text() + "\n"
292
+ return text if text else "No text found in PDF."
293
+ except FileNotFoundError:
294
+ return f"Error: PDF file not found at {pdf_path}"
295
+ except Exception as e:
296
+ return f"Error processing PDF: {str(e)}"
297
+
298
+ @tool
299
+ def process_csv(csv_path: str, operation: str = "summary", params: dict = None) -> str:
300
+ """Processes a CSV file based on the specified operation.
301
+ Args:
302
+ csv_path: The absolute path to the local CSV file.
303
+ operation: The operation to perform. Supported operations:
304
+ "summary": Returns a summary of the CSV (head, columns, dtypes, shape).
305
+ "get_column": Returns the content of a specific column. Requires 'column_name' in params.
306
+ "filter": Filters rows based on a condition. Requires 'column', 'operator', 'value' in params.
307
+ Supported operators: "==", "!=", ">", "<", ">=", "<=".
308
+ "aggregate": Performs aggregation on a column. Requires 'agg_column', 'agg_function' in params.
309
+ Optional: 'group_by_column'. Supported functions: "sum", "mean", "count", "min", "max".
310
+ "describe": Returns descriptive statistics for numerical columns.
311
+ params: A dictionary of parameters for the chosen operation.
312
+ Returns:
313
+ A string containing the result of the operation, or an error message.
314
+ """
315
+ if params is None:
316
+ params = {}
317
+
318
+ try:
319
+ df = pandas.read_csv(csv_path)
320
+
321
+ if operation == "summary":
322
+ summary = f"Shape: {df.shape}\n"
323
+ summary += f"Columns:\n{df.columns.tolist()}\n"
324
+ summary += f"Data Types:\n{df.dtypes}\n"
325
+ summary += f"First 5 rows:\n{df.head().to_string()}"
326
+ return summary
327
+
328
+ elif operation == "get_column":
329
+ column_name = params.get("column_name")
330
+ if column_name not in df.columns:
331
+ return f"Error: Column '{column_name}' not found."
332
+ return df[column_name].to_string()
333
+
334
+ elif operation == "filter":
335
+ column = params.get("column")
336
+ operator = params.get("operator")
337
+ value = params.get("value")
338
+
339
+ if not all([column, operator, value is not None]):
340
+ return "Error: 'column', 'operator', and 'value' are required for filter operation."
341
+ if column not in df.columns:
342
+ return f"Error: Column '{column}' not found."
343
+
344
+ if operator == "==":
345
+ filtered_df = df[df[column] == value]
346
+ elif operator == "!=":
347
+ filtered_df = df[df[column] != value]
348
+ elif operator == ">":
349
+ filtered_df = df[df[column] > value]
350
+ elif operator == "<":
351
+ filtered_df = df[df[column] < value]
352
+ elif operator == ">=":
353
+ filtered_df = df[df[column] >= value]
354
+ elif operator == "<=":
355
+ filtered_df = df[df[column] <= value]
356
+ else:
357
+ return f"Error: Unsupported operator '{operator}'."
358
+ return filtered_df.to_string()
359
+
360
+ elif operation == "aggregate":
361
+ agg_column = params.get("agg_column")
362
+ agg_function = params.get("agg_function")
363
+ group_by_column = params.get("group_by_column")
364
+
365
+ if not all([agg_column, agg_function]):
366
+ return "Error: 'agg_column' and 'agg_function' are required for aggregate operation."
367
+ if agg_column not in df.columns:
368
+ return f"Error: Column '{agg_column}' not found."
369
+ if group_by_column and group_by_column not in df.columns:
370
+ return f"Error: Group by column '{group_by_column}' not found."
371
+
372
+ if agg_function not in ["sum", "mean", "count", "min", "max"]:
373
+ return f"Error: Unsupported aggregation function '{agg_function}'."
374
+
375
+ if group_by_column:
376
+ result = df.groupby(group_by_column)[agg_column].agg(agg_function)
377
+ else:
378
+ result = df[agg_column].agg(agg_function)
379
+ return str(result)
380
+
381
+ elif operation == "describe":
382
+ return df.describe().to_string()
383
+
384
+ else:
385
+ return f"Error: Unsupported operation '{operation}'."
386
+
387
+ except FileNotFoundError:
388
+ return f"Error: CSV file not found at {csv_path}"
389
+ except Exception as e:
390
+ return f"Error processing CSV: {str(e)}"
391
+
392
+ @tool
393
+ def process_code_file(code_file_path: str) -> str:
394
+ """Reads and executes a code file, returning its output along with the full code.
395
+ Args:
396
+ code_file_path: The absolute path to the local code file.
397
+ Returns:
398
+ A string containing the full code and the output of the executed code, or an error message.
399
+ """
400
+ try:
401
+ with open(code_file_path, "r") as f:
402
+ code_content = f.read()
403
+
404
+ if code_file_path.endswith(".py"):
405
+ execution_output = execute_python(code_content)
406
+ return f"--- FULL CODE ---\n{code_content}--- EXECUTION OUTPUT ---\n{execution_output}"
407
+ else:
408
+ return f"Error: Only Python (.py) files are supported for execution. Found: {code_file_path}"
409
+
410
+ except FileNotFoundError:
411
+ return f"Error: Code file not found at {code_file_path}"
412
+ except Exception as e:
413
+ return f"Error processing code file: {str(e)}"
414
+
415
+ @tool
416
+ def process_excel(excel_path: str, operation: str = "summary", params: dict = None) -> str:
417
+ """Processes an Excel file based on the specified operation.
418
+ Args:
419
+ excel_path: The absolute path to the local Excel file.
420
+ operation: The operation to perform. Supported operations:
421
+ "summary": Returns a summary of the Excel file (sheet names, columns, etc.).
422
+ "get_sheet": Returns the content of a specific sheet. Requires 'sheet_name' in params.
423
+
424
+ Returns:
425
+ A string containing the result of the operation, or an error message.
426
+ """
427
+ if params is None:
428
+ params = {}
429
+
430
+ try:
431
+ xls = pandas.ExcelFile(excel_path)
432
+
433
+ if operation == "summary":
434
+ sheet_names = xls.sheet_names
435
+ summary = f"Sheets: {sheet_names}\n"
436
+ for sheet in sheet_names:
437
+ df = pandas.read_excel(xls, sheet_name=sheet)
438
+ summary += f"\n--- Sheet: {sheet} ---\n"
439
+ summary += f"Shape: {df.shape}\n"
440
+ summary += f"Columns: {df.columns.tolist()}\n"
441
+ summary += f"First 5 rows:\n{df.head().to_string()}\n"
442
+ return summary
443
+
444
+ elif operation == "get_sheet":
445
+ sheet_name = params.get("sheet_name")
446
+ if sheet_name not in xls.sheet_names:
447
+ return f"Error: Sheet '{sheet_name}' not found."
448
+ df = pandas.read_excel(xls, sheet_name=sheet_name)
449
+ return df.to_string()
450
+
451
+ else:
452
+ return f"Error: Unsupported operation '{operation}'."
453
+
454
+ except FileNotFoundError:
455
+ return f"Error: Excel file not found at {excel_path}"
456
+ except Exception as e:
457
+ return f"Error processing Excel file: {str(e)}"
458
+
459
+ @tool
460
+ def process_archive(archive_path: str, operation: str = "list", extract_to: str = None) -> str:
461
+ """Processes a .zip archive file.
462
+
463
+ Args:
464
+ archive_path: The absolute path to the local .zip file.
465
+ operation: The operation to perform. Supported operations:
466
+ "list": Lists the contents of the archive.
467
+ "extract": Extracts the entire archive. Requires 'extract_to' parameter.
468
+ extract_to: Optional. The directory to extract the files to.
469
+ If not provided, it will create a directory with the same name as the archive.
470
+
471
+ Returns:
472
+ A string containing the result of the operation, or an error message.
473
+ """
474
+ try:
475
+ if not zipfile.is_zipfile(archive_path):
476
+ return f"Error: File at {archive_path} is not a valid .zip file."
477
+
478
+ with zipfile.ZipFile(archive_path, 'r') as zip_ref:
479
+ if operation == "list":
480
+ file_list = zip_ref.namelist()
481
+ return f"Files in archive: {file_list}"
482
+
483
+ elif operation == "extract":
484
+ if extract_to is None:
485
+ # Create a directory named after the zip file (without extension)
486
+ extract_to, _ = os.path.splitext(archive_path)
487
+
488
+ os.makedirs(extract_to, exist_ok=True)
489
+ zip_ref.extractall(extract_to)
490
+ return f"Archive extracted successfully to: {extract_to}"
491
+
492
+ else:
493
+ return f"Error: Unsupported operation '{operation}'."
494
+
495
+ except FileNotFoundError:
496
+ return f"Error: Archive file not found at {archive_path}"
497
+ except Exception as e:
498
+ return f"Error processing archive: {str(e)}"
499
+
500
+ @tool
501
+ def read_text_file(file_path: str) -> str:
502
+ """Reads the entire content of a text file.
503
+ Args:
504
+ file_path: The absolute path to the local text file (.txt, .md, .json, etc.).
505
+ Returns:
506
+ A string containing the full content of the file, or an error message.
507
+ """
508
+ try:
509
+ with open(file_path, "r", encoding='utf-8') as f:
510
+ content = f.read()
511
+ return content
512
+ except FileNotFoundError:
513
+ return f"Error: File not found at {file_path}"
514
+ except Exception as e:
515
+ return f"Error reading text file: {str(e)}"
516
+
517
+
518
+ # Global model cache to avoid reloading
519
+ _whisper_model = None
520
+
521
+ @tool
522
+ def process_audio(audio_path: str) -> str:
523
+ """Analyzes an audio file using local Whisper model for transcription.
524
+
525
+ Args:
526
+ audio_path: The absolute path to the local audio file
527
+
528
+ Returns:
529
+ A transcription and basic analysis of the audio content
530
+ """
531
+ global _whisper_model
532
+
533
+ try:
534
+ # Check if file exists
535
+ if not os.path.exists(audio_path):
536
+ return f"Error: Audio file not found at {audio_path}"
537
+
538
+ # Check file size
539
+ file_size = os.path.getsize(audio_path)
540
+ if file_size > 100 * 1024 * 1024: # 100MB limit
541
+ return f"Error: Audio file too large ({file_size / (1024*1024):.1f}MB)"
542
+
543
+ # Load model once and cache it
544
+ if _whisper_model is None:
545
+ try:
546
+ _whisper_model = whisper.load_model("base")
547
+ print("Whisper model loaded")
548
+ except Exception as e:
549
+ return f"Error loading Whisper model: {str(e)}\nTry: pip install openai-whisper"
550
+
551
+ # Transcribe audio
552
+ result = _whisper_model.transcribe(audio_path)
553
+ transcription = result["text"].strip()
554
+ detected_language = result.get("language", "unknown")
555
+
556
+ # Basic info
557
+ word_count = len(transcription.split())
558
+
559
+ return f"""AUDIO TRANSCRIPTION:
560
+ File: {Path(audio_path).name}
561
+ Size: {file_size / (1024*1024):.1f}MB
562
+ Language: {detected_language}
563
+ Words: {word_count}
564
+ TRANSCRIPT:
565
+ {transcription}
566
+ """
567
+
568
+ except Exception as e:
569
+ return f"Error processing audio: {str(e)}"
570
+
571
+ @tool
572
+ def process_youtube_video(url: str, question: str) -> str:
573
+ """
574
+ REQUIRED for YouTube video analysis. Downloads and analyzes YouTube videos
575
+ to answer questions about visual content, count objects, identify details.
576
+
577
+ Use this tool WHENEVER you see a YouTube URL in the question.
578
+ This is the ONLY way to analyze YouTube video content accurately.
579
+
580
+ Args:
581
+ url: YouTube video URL (any youtube.com or youtu.be link)
582
+ question: The specific question about the video content
583
+
584
+ Returns:
585
+ Detailed analysis of the actual video content
586
+ """
587
+ try:
588
+ # Import and configure the direct Google AI library
589
+ import google.generativeai as genai
590
+ genai.configure(api_key=os.environ.get("GOOGLE_API_KEY"))
591
+
592
+ # Create temporary directory for video
593
+ with tempfile.TemporaryDirectory() as temp_dir:
594
+ temp_path = Path(temp_dir)
595
+
596
+ # Configure yt-dlp options
597
+ ydl_opts = {
598
+ 'format': 'best[height<=720]', # Limit quality to save quota
599
+ 'outtmpl': str(temp_path / '%(title)s.%(ext)s'),
600
+ 'quiet': True,
601
+ 'no_warnings': True,
602
+ }
603
+
604
+ print(f"Downloading video from: {url}")
605
+
606
+ # Download video
607
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
608
+ info = ydl.extract_info(url, download=True)
609
+ video_title = info.get('title', 'Unknown')
610
+ duration = info.get('duration', 0)
611
+
612
+ # Find downloaded file
613
+ video_files = list(temp_path.glob('*'))
614
+ if not video_files:
615
+ return "Error: Failed to download video file"
616
+
617
+ video_file = video_files[0]
618
+ file_size = video_file.stat().st_size / (1024 * 1024) # MB
619
+
620
+ print(f"Video downloaded: {video_title} ({duration}s, {file_size:.1f}MB)")
621
+
622
+ # Check file size limit
623
+ if file_size > 100: # 100MB limit for Gemini
624
+ return f"Error: Video too large ({file_size:.1f}MB). Maximum size is 100MB."
625
+
626
+ # Upload and process with Gemini
627
+ try:
628
+ # Upload video file
629
+ print("Uploading video to Gemini...")
630
+ video_file_obj = genai.upload_file(str(video_file))
631
+
632
+ # Wait for processing
633
+ while video_file_obj.state.name == "PROCESSING":
634
+ print("Processing video...")
635
+ time.sleep(2)
636
+ video_file_obj = genai.get_file(video_file_obj.name)
637
+
638
+ if video_file_obj.state.name == "FAILED":
639
+ return "Error: Video processing failed"
640
+
641
+ # Create analysis prompt
642
+ analysis_prompt = f"""Analyze this video carefully to answer the following question: {question}
643
+ Please examine the video content thoroughly and provide a detailed, accurate answer. Pay attention to visual details, timing, and any relevant information that helps answer the question.
644
+ Video title: {video_title}
645
+ Duration: {duration} seconds
646
+ Question: {question}"""
647
+
648
+ # Generate analysis with Gemini 2.0 Flash
649
+ model = genai.GenerativeModel('gemini-2.0-flash')
650
+ response = model.generate_content([analysis_prompt, video_file_obj])
651
+
652
+ # Clean up uploaded file
653
+ try:
654
+ genai.delete_file(video_file_obj.name)
655
+ except:
656
+ pass
657
+
658
+ return f"""VIDEO ANALYSIS:
659
+ Title: {video_title}
660
+ URL: {url}
661
+ Duration: {duration} seconds
662
+ Size: {file_size:.1f}MB
663
+ QUESTION: {question}
664
+ ANSWER: {response.text}"""
665
+
666
+ except Exception as processing_error:
667
+ return f"Error processing video with Gemini: {str(processing_error)}"
668
+
669
+ except ImportError:
670
+ return "Error: google-generativeai library not installed. Run: pip install google-generativeai"
671
+ except Exception as e:
672
+ return f"Error downloading or processing video: {str(e)}"