Raghoottam commited on
Commit
1226c19
·
verified ·
1 Parent(s): 04b834e

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +8 -9
  2. agent.py +1 -1
  3. app.py +183 -312
  4. gitattributes +35 -0
  5. metadata.jsonl +0 -0
  6. requirements.txt +19 -4
README.md CHANGED
@@ -1,16 +1,15 @@
1
  ---
2
- title: GAIA Leaderboard
3
- emoji: 🦾
4
- colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
 
7
  app_file: app.py
8
- pinned: true
9
- license: apache-2.0
10
  hf_oauth: true
11
- failure_strategy: rollback
12
- tags:
13
- - leaderboard
14
  ---
15
 
16
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Template Final Assignment
3
+ emoji: 🕵🏻‍♂️
4
+ colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 5.25.2
8
  app_file: app.py
9
+ pinned: false
 
10
  hf_oauth: true
11
+ # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
12
+ hf_oauth_expiration_minutes: 480
 
13
  ---
14
 
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
agent.py CHANGED
@@ -207,4 +207,4 @@ def build_graph(provider: str = "groq"):
207
  builder.add_conditional_edges("assistant", tools_condition)
208
  builder.add_edge("tools", "assistant")
209
 
210
- return builder.compile()
 
207
  builder.add_conditional_edges("assistant", tools_condition)
208
  builder.add_edge("tools", "assistant")
209
 
210
+ return builder.compile()
app.py CHANGED
@@ -1,334 +1,205 @@
1
  import os
2
- import json
3
- import datetime
4
- import requests
5
- from email.utils import parseaddr
6
-
7
  import gradio as gr
 
 
8
  import pandas as pd
9
- import numpy as np
10
-
11
- from datasets import load_dataset, VerificationMode
12
- from apscheduler.schedulers.background import BackgroundScheduler
13
- from huggingface_hub import HfApi
14
-
15
- # InfoStrings
16
- from scorer import question_scorer
17
- from content import format_error, format_warning, format_log, TITLE, INTRODUCTION_TEXT, SUBMISSION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, model_hyperlink
18
-
19
- TOKEN = os.environ.get("TOKEN", None)
20
 
21
- OWNER="gaia-benchmark"
22
- DATA_DATASET = f"{OWNER}/GAIA"
23
- INTERNAL_DATA_DATASET = f"{OWNER}/GAIA_internal"
24
- SUBMISSION_DATASET = f"{OWNER}/submissions_internal"
25
- SUBMISSION_DATASET_PUBLIC = f"{OWNER}/submissions_public"
26
- CONTACT_DATASET = f"{OWNER}/contact_info"
27
- RESULTS_DATASET = f"{OWNER}/results_public"
28
- LEADERBOARD_PATH = f"{OWNER}/leaderboard"
29
- api = HfApi()
30
 
31
- YEAR_VERSION = "2023"
32
- ref_scores_len = {"validation": 165, "test": 301}
33
- ref_level_len = {"validation": {1: 53, 2: 86, 3: 26}, "test": {1: 93, 2: 159, 3: 49}}
34
 
35
- os.makedirs("scored", exist_ok=True)
 
 
36
 
37
- # Should be False on spaces and True outside
38
- LOCAL_DEBUG = False #os.environ.get("system", "") != "spaces"
39
 
40
- # Display the results
41
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
42
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
43
- def get_dataframe_from_results(eval_results, split):
44
- local_df = eval_results[split]
45
- local_df = local_df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
46
- local_df = local_df.remove_columns(["system_prompt", "url"])
47
- local_df = local_df.rename_column("model", "Agent name")
48
- local_df = local_df.rename_column("model_family", "Model family")
49
- local_df = local_df.rename_column("score", "Average score (%)")
50
- for i in [1, 2, 3]:
51
- local_df = local_df.rename_column(f"score_level{i}", f"Level {i} score (%)")
52
- local_df = local_df.rename_column("date", "Submission date")
53
- df = pd.DataFrame(local_df)
54
- df = df.sort_values(by=["Average score (%)"], ascending=False)
55
 
56
- numeric_cols = [c for c in local_df.column_names if "score" in c]
57
- df[numeric_cols] = df[numeric_cols].multiply(100).round(decimals=2)
58
- #df = df.style.format("{:.2%}", subset=numeric_cols)
 
 
 
 
59
 
60
- return df
61
 
62
- #eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
63
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
 
 
 
 
 
64
 
65
- # Gold answers
66
- gold_results = {}
67
- gold_dataset = load_dataset(INTERNAL_DATA_DATASET, f"{YEAR_VERSION}_all", token=TOKEN, trust_remote_code=True)
68
- gold_results = {split: {row["task_id"]: row for row in gold_dataset[split]} for split in ["test", "validation"]}
 
 
69
 
 
 
 
70
 
71
- def restart_space():
72
- api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
73
-
74
- TYPES = ["markdown", "number", "number", "number", "number", "str", "str", "str"]
75
-
76
- def add_new_eval(
77
- #val_or_test: str,
78
- model: str,
79
- model_family: str,
80
- system_prompt: str,
81
- url: str,
82
- path_to_file: str,
83
- organisation: str,
84
- mail: str,
85
- profile: gr.OAuthProfile,
86
- ):
87
- val_or_test = "test"
88
  try:
89
- # Was the profile created less than 2 month ago?
90
- user_data = requests.get(f"https://huggingface.co/api/users/{profile.username}/overview")
91
- creation_date = json.loads(user_data.content)["createdAt"]
92
- if datetime.datetime.now() - datetime.datetime.strptime(creation_date, '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.timedelta(days=60):
93
- return format_error("This account is not authorized to submit on GAIA.")
94
-
95
-
96
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
97
- user_submission_dates = sorted(row["date"] for row in contact_infos[val_or_test] if row["username"] == profile.username)
98
- if len(user_submission_dates) > 0 and user_submission_dates[-1] == datetime.datetime.today().strftime('%Y-%m-%d'):
99
- return format_error("You already submitted once today, please try again tomorrow.")
100
-
101
-
102
- is_validation = val_or_test == "validation"
103
- # Very basic email parsing
104
- _, parsed_mail = parseaddr(mail)
105
- if not "@" in parsed_mail:
106
- return format_warning("Please provide a valid email adress.")
107
-
108
- print("Adding new eval")
109
-
110
- # Check if the combination model/org already exists and prints a warning message if yes
111
- if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
112
- return format_warning("This model has been already submitted.")
113
-
114
- if path_to_file is None:
115
- return format_warning("Please attach a file.")
116
-
117
- # SAVE UNSCORED SUBMISSION
118
- if LOCAL_DEBUG:
119
- print("mock uploaded submission")
120
- else:
121
- api.upload_file(
122
- repo_id=SUBMISSION_DATASET,
123
- path_or_fileobj=path_to_file.name,
124
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_raw_{datetime.datetime.today()}.jsonl",
125
- repo_type="dataset",
126
- token=TOKEN
127
- )
128
-
129
- # SAVE CONTACT
130
- contact_info = {
131
- "model": model,
132
- "model_family": model_family,
133
- "url": url,
134
- "organisation": organisation,
135
- "username": profile.username,
136
- "mail": mail,
137
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
138
- }
139
- contact_infos[val_or_test]= contact_infos[val_or_test].add_item(contact_info)
140
- if LOCAL_DEBUG:
141
- print("mock uploaded contact info")
142
- else:
143
- contact_infos.push_to_hub(CONTACT_DATASET, config_name = YEAR_VERSION, token=TOKEN)
144
-
145
- # SCORE SUBMISSION
146
- file_path = path_to_file.name
147
- scores = {"all": 0, 1: 0, 2: 0, 3: 0}
148
- num_questions = {"all": 0, 1: 0, 2: 0, 3: 0}
149
- task_ids = []
150
- with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file:
151
- with open(file_path, 'r') as f:
152
- for ix, line in enumerate(f):
153
- try:
154
- task = json.loads(line)
155
- except Exception:
156
- return format_error(f"Line {ix} is incorrectly formatted. Please fix it and resubmit your file.")
157
-
158
- if "model_answer" not in task:
159
- return format_error(f"Line {ix} contains no model_answer key. Please fix it and resubmit your file.")
160
- answer = task["model_answer"]
161
- task_id = task["task_id"]
162
- try:
163
- level = int(gold_results[val_or_test][task_id]["Level"])
164
- except KeyError:
165
- return format_error(f"{task_id} not found in split {val_or_test}. Are you sure you submitted the correct file?")
166
-
167
- score = question_scorer(task['model_answer'], gold_results[val_or_test][task_id]["Final answer"])
168
-
169
- scored_file.write(
170
- json.dumps({
171
- "id": task_id,
172
- "model_answer": answer,
173
- "score": score,
174
- "level": level
175
- }) + "\n"
176
- )
177
- task_ids.append(task_id)
178
-
179
- scores["all"] += score
180
- scores[level] += score
181
- num_questions["all"] += 1
182
- num_questions[level] += 1
183
-
184
- # Check if there's any duplicate in the submission
185
- if len(task_ids) != len(set(task_ids)):
186
- return format_error("There are duplicates in your submission. Please check your file and resubmit it.")
187
-
188
- if any([num_questions[level] != ref_level_len[val_or_test][level] for level in [1, 2, 3]]):
189
- return format_error(f"Your submission has {num_questions[1]} questions for level 1, {num_questions[2]} for level 2, and {num_questions[3]} for level 3, but it should have {ref_level_len[val_or_test][1]}, {ref_level_len[val_or_test][2]}, and {ref_level_len[val_or_test][3]} respectively. Please check your submission.")
190
-
191
- # SAVE SCORED SUBMISSION
192
- if LOCAL_DEBUG:
193
- print("mock uploaded scored submission")
194
- else:
195
- api.upload_file(
196
- repo_id=SUBMISSION_DATASET,
197
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
198
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
199
- repo_type="dataset",
200
- token=TOKEN
201
- )
202
-
203
- # Save scored file
204
- if is_validation:
205
- api.upload_file(
206
- repo_id=SUBMISSION_DATASET_PUBLIC,
207
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
208
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
209
- repo_type="dataset",
210
- token=TOKEN
211
- )
212
-
213
- # SAVE TO LEADERBOARD DATA
214
- eval_entry = {
215
- "model": model,
216
- "model_family": model_family,
217
- "system_prompt": system_prompt,
218
- "url": url,
219
- "organisation": organisation,
220
- "score": scores["all"]/ref_scores_len[val_or_test],
221
- "score_level1": scores[1]/num_questions[1],
222
- "score_level2": scores[2]/num_questions[2],
223
- "score_level3": scores[3]/num_questions[3],
224
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
225
- }
226
- if num_questions[1] + num_questions[2] + num_questions[3] != ref_scores_len[val_or_test]:
227
- return format_error(f"Your submission has {len(scores['all'])} questions for the {val_or_test} set, but it should have {ref_scores_len[val_or_test]}. Please check your submission.")
228
- # Catching spam submissions of 100%
229
- if all((eval_entry[k] == 1 for k in ["score_level1", "score_level2", "score_level3"])):
230
- return format_error(f"There was a problem with your submission. Please open a discussion.")
231
-
232
- # Testing for duplicates - to see if we want to add something like it as it would allow people to try to see the content of other submissions
233
- #eval_entry_no_date = {k: v for k, v in eval_entry if k != "date"}
234
- #columns_no_date = [c for c in eval_results[val_or_test].column_names if c != "date"]
235
- #if eval_entry_no_date in eval_results[val_or_test].select_columns(columns_no_date):
236
- # return format_error(f"Your submission is an exact duplicate from an existing submission.")
237
-
238
- eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
239
- print(eval_results)
240
- if LOCAL_DEBUG:
241
- print("mock uploaded results to lb")
242
- else:
243
- eval_results.push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)
244
-
245
-
246
- return format_log(f"Model {model} submitted by {organisation} successfully.\nPlease wait a few hours and refresh the leaderboard to see your score displayed.")
247
  except Exception as e:
248
- print(e)
249
- return format_error(f"An error occurred, please open a discussion and indicate at what time you encountered the error.\n")
250
-
251
-
252
- def refresh():
253
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS,trust_remote_code=True)
254
- #eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
255
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
256
- return eval_dataframe_test
257
-
258
- def upload_file(files):
259
- file_paths = [file.name for file in files]
260
- return file_paths
261
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
263
- demo = gr.Blocks()
264
- with demo:
265
- gr.HTML(TITLE)
266
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
267
 
268
- with gr.Row():
269
- with gr.Accordion("📙 Citation", open=False):
270
- citation_button = gr.Textbox(
271
- value=CITATION_BUTTON_TEXT,
272
- label=CITATION_BUTTON_LABEL,
273
- elem_id="citation-button",
274
- ) #.style(show_copy_button=True)
275
 
276
- gr.Markdown("Results: Test")
277
- leaderboard_table_test = gr.components.Dataframe(
278
- value=eval_dataframe_test, datatype=TYPES, interactive=False,
279
- column_widths=["20%"]
280
- )
281
- #with gr.Tab("Results: Validation"):
282
- # leaderboard_table_val = gr.components.Dataframe(
283
- # value=eval_dataframe_val, datatype=TYPES, interactive=False,
284
- # column_widths=["20%"]
285
- # )
286
 
287
- refresh_button = gr.Button("Refresh")
288
- refresh_button.click(
289
- refresh,
290
- inputs=[],
291
- outputs=[
292
- #leaderboard_table_val,
293
- leaderboard_table_test,
294
- ],
295
  )
296
- with gr.Accordion("Submit a new model for evaluation"):
297
- with gr.Row():
298
- gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text")
299
- with gr.Row():
300
- with gr.Column():
301
- #level_of_test = gr.Radio(["test"], value="test", label="Split")
302
- model_name_textbox = gr.Textbox(label="Agent name")
303
- model_family_textbox = gr.Textbox(label="Model family")
304
- system_prompt_textbox = gr.Textbox(label="System prompt example")
305
- url_textbox = gr.Textbox(label="Url to model information")
306
- with gr.Column():
307
- organisation = gr.Textbox(label="Organisation")
308
- mail = gr.Textbox(label="Contact email (will be stored privately, & used if there is an issue with your submission)")
309
- file_output = gr.File()
310
-
311
-
312
- with gr.Row():
313
- gr.LoginButton()
314
- submit_button = gr.Button("Submit Eval On Test")
315
- submission_result = gr.Markdown()
316
- submit_button.click(
317
- add_new_eval,
318
- [
319
- #level_of_test,
320
- model_name_textbox,
321
- model_family_textbox,
322
- system_prompt_textbox,
323
- url_textbox,
324
- file_output,
325
- organisation,
326
- mail
327
- ],
328
- submission_result,
329
- )
330
 
331
- scheduler = BackgroundScheduler()
332
- scheduler.add_job(restart_space, "interval", seconds=3600)
333
- scheduler.start()
334
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
 
 
 
 
2
  import gradio as gr
3
+ import requests
4
+ import inspect
5
  import pandas as pd
6
+ from langchain_core.messages import HumanMessage
7
+ from agent import build_graph
 
 
 
 
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # (Keep Constants as is)
11
+ # --- Constants ---
12
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ HF_TOKEN = os.getenv("HF_TOKEN")
17
 
 
 
18
 
19
+ class BasicAgent:
20
+ def __init__(self):
21
+ print("SmartAgent initialized.")
22
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ def __call__(self, question: str) -> str:
25
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
26
+ # Wrap the question in a HumanMessage from langchain_core
27
+ messages = [HumanMessage(content=question)]
28
+ messages = self.graph.invoke({"messages": messages})
29
+ answer = messages['messages'][-1].content
30
+ return answer[14:]
31
 
 
32
 
33
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
34
+ """
35
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
36
+ and displays the results.
37
+ """
38
+ # --- Determine HF Space Runtime URL and Repo URL ---
39
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
40
 
41
+ if profile:
42
+ username= f"{profile.username}"
43
+ print(f"User logged in: {username}")
44
+ else:
45
+ print("User not logged in.")
46
+ return "Please Login to Hugging Face with the button.", None
47
 
48
+ api_url = DEFAULT_API_URL
49
+ questions_url = f"{api_url}/questions"
50
+ submit_url = f"{api_url}/submit"
51
 
52
+ # 1. Instantiate Agent ( modify this part to create your agent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  try:
54
+ agent = BasicAgent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  except Exception as e:
56
+ print(f"Error instantiating agent: {e}")
57
+ return f"Error initializing agent: {e}", None
58
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
59
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
60
+ print(agent_code)
61
+
62
+ # 2. Fetch Questions
63
+ print(f"Fetching questions from: {questions_url}")
64
+ try:
65
+ response = requests.get(questions_url, timeout=15)
66
+ response.raise_for_status()
67
+ questions_data = response.json()
68
+ if not questions_data:
69
+ print("Fetched questions list is empty.")
70
+ return "Fetched questions list is empty or invalid format.", None
71
+ print(f"Fetched {len(questions_data)} questions.")
72
+ except requests.exceptions.RequestException as e:
73
+ print(f"Error fetching questions: {e}")
74
+ return f"Error fetching questions: {e}", None
75
+ except requests.exceptions.JSONDecodeError as e:
76
+ print(f"Error decoding JSON response from questions endpoint: {e}")
77
+ print(f"Response text: {response.text[:500]}")
78
+ return f"Error decoding server response for questions: {e}", None
79
+ except Exception as e:
80
+ print(f"An unexpected error occurred fetching questions: {e}")
81
+ return f"An unexpected error occurred fetching questions: {e}", None
82
+
83
+ # 3. Run your Agent
84
+ results_log = []
85
+ answers_payload = []
86
+ print(f"Running agent on {len(questions_data)} questions...")
87
+ for item in questions_data:
88
+ task_id = item.get("task_id")
89
+ question_text = item.get("question")
90
+ if not task_id or question_text is None:
91
+ print(f"Skipping item with missing task_id or question: {item}")
92
+ continue
93
+ try:
94
+ submitted_answer = agent(question_text)
95
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
96
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
97
+ except Exception as e:
98
+ print(f"Error running agent on task {task_id}: {e}")
99
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
100
+
101
+ if not answers_payload:
102
+ print("Agent did not produce any answers to submit.")
103
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
104
+
105
+ # 4. Prepare Submission
106
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
107
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
108
+ print(status_update)
109
+
110
+ # 5. Submit
111
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
112
+ try:
113
+ response = requests.post(submit_url, json=submission_data, timeout=60)
114
+ response.raise_for_status()
115
+ result_data = response.json()
116
+ final_status = (
117
+ f"Submission Successful!\n"
118
+ f"User: {result_data.get('username')}\n"
119
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
120
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
121
+ f"Message: {result_data.get('message', 'No message received.')}"
122
+ )
123
+ print("Submission successful.")
124
+ results_df = pd.DataFrame(results_log)
125
+ return final_status, results_df
126
+ except requests.exceptions.HTTPError as e:
127
+ error_detail = f"Server responded with status {e.response.status_code}."
128
+ try:
129
+ error_json = e.response.json()
130
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
131
+ except requests.exceptions.JSONDecodeError:
132
+ error_detail += f" Response: {e.response.text[:500]}"
133
+ status_message = f"Submission Failed: {error_detail}"
134
+ print(status_message)
135
+ results_df = pd.DataFrame(results_log)
136
+ return status_message, results_df
137
+ except requests.exceptions.Timeout:
138
+ status_message = "Submission Failed: The request timed out."
139
+ print(status_message)
140
+ results_df = pd.DataFrame(results_log)
141
+ return status_message, results_df
142
+ except requests.exceptions.RequestException as e:
143
+ status_message = f"Submission Failed: Network error - {e}"
144
+ print(status_message)
145
+ results_df = pd.DataFrame(results_log)
146
+ return status_message, results_df
147
+ except Exception as e:
148
+ status_message = f"An unexpected error occurred during submission: {e}"
149
+ print(status_message)
150
+ results_df = pd.DataFrame(results_log)
151
+ return status_message, results_df
152
+
153
+
154
+ # --- Build Gradio Interface using Blocks ---
155
+ with gr.Blocks() as demo:
156
+ gr.Markdown("# Basic Agent Evaluation Runner")
157
+ gr.Markdown(
158
+ """
159
+ **Instructions:**
160
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
161
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
162
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
163
+ ---
164
+ **Disclaimers:**
165
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
166
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
167
+ """
168
+ )
169
 
170
+ gr.LoginButton()
 
 
 
171
 
172
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
 
 
 
 
 
 
173
 
174
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
175
+ # Removed max_rows=10 from DataFrame constructor
176
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
 
 
 
 
 
 
 
177
 
178
+ run_button.click(
179
+ fn=run_and_submit_all,
180
+ outputs=[status_output, results_table]
 
 
 
 
 
181
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
+ if __name__ == "__main__":
184
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
185
+ # Check for SPACE_HOST and SPACE_ID at startup for information
186
+ space_host_startup = os.getenv("SPACE_HOST")
187
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
188
+
189
+ if space_host_startup:
190
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
191
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
192
+ else:
193
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
194
+
195
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
196
+ print(f"✅ SPACE_ID found: {space_id_startup}")
197
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
198
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
199
+ else:
200
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
201
+
202
+ print("-"*(60 + len(" App Starting ")) + "\n")
203
+
204
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
205
+ demo.launch(debug=True, share=False)
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt CHANGED
@@ -1,5 +1,20 @@
1
- datasets
2
  gradio
3
- huggingface-hub
4
- numpy
5
- APScheduler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ langchain
4
+ langchain-community
5
+ langchain-core
6
+ langchain-google-genai
7
+ langchain-huggingface
8
+ langchain-groq
9
+ langchain-tavily
10
+ langchain-chroma
11
+ langgraph
12
+ huggingface_hub
13
+ sentence-transformers
14
+ arxiv
15
+ pymupdf
16
+ wikipedia
17
+ pgvector
18
+ python-dotenv
19
+ protobuf==3.20.*
20
+ chromadb