Raghoottam commited on
Commit
e1b6936
·
verified ·
1 Parent(s): 94e215a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -310
app.py CHANGED
@@ -1,334 +1,209 @@
 
1
  import os
2
- import json
3
- import datetime
4
- import requests
5
- from email.utils import parseaddr
6
-
7
  import gradio as gr
 
8
  import pandas as pd
9
- import numpy as np
10
-
11
- from datasets import load_dataset, VerificationMode
12
- from apscheduler.schedulers.background import BackgroundScheduler
13
- from huggingface_hub import HfApi
14
-
15
- # InfoStrings
16
- from scorer import question_scorer
17
- from content import format_error, format_warning, format_log, TITLE, INTRODUCTION_TEXT, SUBMISSION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, model_hyperlink
18
 
19
- TOKEN = os.environ.get("TOKEN", None)
20
 
21
- OWNER="gaia-benchmark"
22
- DATA_DATASET = f"{OWNER}/GAIA"
23
- INTERNAL_DATA_DATASET = f"{OWNER}/GAIA_internal"
24
- SUBMISSION_DATASET = f"{OWNER}/submissions_internal"
25
- SUBMISSION_DATASET_PUBLIC = f"{OWNER}/submissions_public"
26
- CONTACT_DATASET = f"{OWNER}/contact_info"
27
- RESULTS_DATASET = f"{OWNER}/results_public"
28
- LEADERBOARD_PATH = f"{OWNER}/leaderboard"
29
- api = HfApi()
30
 
31
- YEAR_VERSION = "2023"
32
- ref_scores_len = {"validation": 165, "test": 301}
33
- ref_level_len = {"validation": {1: 53, 2: 86, 3: 26}, "test": {1: 93, 2: 159, 3: 49}}
34
 
35
- os.makedirs("scored", exist_ok=True)
 
36
 
37
- # Should be False on spaces and True outside
38
- LOCAL_DEBUG = False #os.environ.get("system", "") != "spaces"
39
 
40
- # Display the results
41
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
42
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
43
- def get_dataframe_from_results(eval_results, split):
44
- local_df = eval_results[split]
45
- local_df = local_df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
46
- local_df = local_df.remove_columns(["system_prompt", "url"])
47
- local_df = local_df.rename_column("model", "Agent name")
48
- local_df = local_df.rename_column("model_family", "Model family")
49
- local_df = local_df.rename_column("score", "Average score (%)")
50
- for i in [1, 2, 3]:
51
- local_df = local_df.rename_column(f"score_level{i}", f"Level {i} score (%)")
52
- local_df = local_df.rename_column("date", "Submission date")
53
- df = pd.DataFrame(local_df)
54
- df = df.sort_values(by=["Average score (%)"], ascending=False)
55
 
56
- numeric_cols = [c for c in local_df.column_names if "score" in c]
57
- df[numeric_cols] = df[numeric_cols].multiply(100).round(decimals=2)
58
- #df = df.style.format("{:.2%}", subset=numeric_cols)
 
 
 
59
 
60
- return df
61
 
62
- #eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
63
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
64
 
65
- # Gold answers
66
- gold_results = {}
67
- gold_dataset = load_dataset(INTERNAL_DATA_DATASET, f"{YEAR_VERSION}_all", token=TOKEN, trust_remote_code=True)
68
- gold_results = {split: {row["task_id"]: row for row in gold_dataset[split]} for split in ["test", "validation"]}
 
 
 
69
 
 
 
 
 
 
 
70
 
71
- def restart_space():
72
- api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
 
73
 
74
- TYPES = ["markdown", "number", "number", "number", "number", "str", "str", "str"]
75
-
76
- def add_new_eval(
77
- #val_or_test: str,
78
- model: str,
79
- model_family: str,
80
- system_prompt: str,
81
- url: str,
82
- path_to_file: str,
83
- organisation: str,
84
- mail: str,
85
- profile: gr.OAuthProfile,
86
- ):
87
- val_or_test = "test"
88
  try:
89
- # Was the profile created less than 2 month ago?
90
- user_data = requests.get(f"https://huggingface.co/api/users/{profile.username}/overview")
91
- creation_date = json.loads(user_data.content)["createdAt"]
92
- if datetime.datetime.now() - datetime.datetime.strptime(creation_date, '%Y-%m-%dT%H:%M:%S.%fZ') < datetime.timedelta(days=60):
93
- return format_error("This account is not authorized to submit on GAIA.")
94
-
95
-
96
- contact_infos = load_dataset(CONTACT_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS, trust_remote_code=True)
97
- user_submission_dates = sorted(row["date"] for row in contact_infos[val_or_test] if row["username"] == profile.username)
98
- if len(user_submission_dates) > 0 and user_submission_dates[-1] == datetime.datetime.today().strftime('%Y-%m-%d'):
99
- return format_error("You already submitted once today, please try again tomorrow.")
100
-
101
-
102
- is_validation = val_or_test == "validation"
103
- # Very basic email parsing
104
- _, parsed_mail = parseaddr(mail)
105
- if not "@" in parsed_mail:
106
- return format_warning("Please provide a valid email adress.")
107
-
108
- print("Adding new eval")
109
-
110
- # Check if the combination model/org already exists and prints a warning message if yes
111
- if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
112
- return format_warning("This model has been already submitted.")
113
-
114
- if path_to_file is None:
115
- return format_warning("Please attach a file.")
116
-
117
- # SAVE UNSCORED SUBMISSION
118
- if LOCAL_DEBUG:
119
- print("mock uploaded submission")
120
- else:
121
- api.upload_file(
122
- repo_id=SUBMISSION_DATASET,
123
- path_or_fileobj=path_to_file.name,
124
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_raw_{datetime.datetime.today()}.jsonl",
125
- repo_type="dataset",
126
- token=TOKEN
127
- )
128
-
129
- # SAVE CONTACT
130
- contact_info = {
131
- "model": model,
132
- "model_family": model_family,
133
- "url": url,
134
- "organisation": organisation,
135
- "username": profile.username,
136
- "mail": mail,
137
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
138
- }
139
- contact_infos[val_or_test]= contact_infos[val_or_test].add_item(contact_info)
140
- if LOCAL_DEBUG:
141
- print("mock uploaded contact info")
142
- else:
143
- contact_infos.push_to_hub(CONTACT_DATASET, config_name = YEAR_VERSION, token=TOKEN)
144
-
145
- # SCORE SUBMISSION
146
- file_path = path_to_file.name
147
- scores = {"all": 0, 1: 0, 2: 0, 3: 0}
148
- num_questions = {"all": 0, 1: 0, 2: 0, 3: 0}
149
- task_ids = []
150
- with open(f"scored/{organisation}_{model}.jsonl", "w") as scored_file:
151
- with open(file_path, 'r') as f:
152
- for ix, line in enumerate(f):
153
- try:
154
- task = json.loads(line)
155
- except Exception:
156
- return format_error(f"Line {ix} is incorrectly formatted. Please fix it and resubmit your file.")
157
-
158
- if "model_answer" not in task:
159
- return format_error(f"Line {ix} contains no model_answer key. Please fix it and resubmit your file.")
160
- answer = task["model_answer"]
161
- task_id = task["task_id"]
162
- try:
163
- level = int(gold_results[val_or_test][task_id]["Level"])
164
- except KeyError:
165
- return format_error(f"{task_id} not found in split {val_or_test}. Are you sure you submitted the correct file?")
166
-
167
- score = question_scorer(task['model_answer'], gold_results[val_or_test][task_id]["Final answer"])
168
-
169
- scored_file.write(
170
- json.dumps({
171
- "id": task_id,
172
- "model_answer": answer,
173
- "score": score,
174
- "level": level
175
- }) + "\n"
176
- )
177
- task_ids.append(task_id)
178
-
179
- scores["all"] += score
180
- scores[level] += score
181
- num_questions["all"] += 1
182
- num_questions[level] += 1
183
-
184
- # Check if there's any duplicate in the submission
185
- if len(task_ids) != len(set(task_ids)):
186
- return format_error("There are duplicates in your submission. Please check your file and resubmit it.")
187
-
188
- if any([num_questions[level] != ref_level_len[val_or_test][level] for level in [1, 2, 3]]):
189
- return format_error(f"Your submission has {num_questions[1]} questions for level 1, {num_questions[2]} for level 2, and {num_questions[3]} for level 3, but it should have {ref_level_len[val_or_test][1]}, {ref_level_len[val_or_test][2]}, and {ref_level_len[val_or_test][3]} respectively. Please check your submission.")
190
-
191
- # SAVE SCORED SUBMISSION
192
- if LOCAL_DEBUG:
193
- print("mock uploaded scored submission")
194
- else:
195
- api.upload_file(
196
- repo_id=SUBMISSION_DATASET,
197
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
198
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
199
- repo_type="dataset",
200
- token=TOKEN
201
- )
202
-
203
- # Save scored file
204
- if is_validation:
205
- api.upload_file(
206
- repo_id=SUBMISSION_DATASET_PUBLIC,
207
- path_or_fileobj=f"scored/{organisation}_{model}.jsonl",
208
- path_in_repo=f"{organisation}/{model}/{YEAR_VERSION}_{val_or_test}_scored_{datetime.datetime.today()}.jsonl",
209
- repo_type="dataset",
210
- token=TOKEN
211
- )
212
-
213
- # SAVE TO LEADERBOARD DATA
214
- eval_entry = {
215
- "model": model,
216
- "model_family": model_family,
217
- "system_prompt": system_prompt,
218
- "url": url,
219
- "organisation": organisation,
220
- "score": scores["all"]/ref_scores_len[val_or_test],
221
- "score_level1": scores[1]/num_questions[1],
222
- "score_level2": scores[2]/num_questions[2],
223
- "score_level3": scores[3]/num_questions[3],
224
- "date": datetime.datetime.today().strftime('%Y-%m-%d')
225
- }
226
- if num_questions[1] + num_questions[2] + num_questions[3] != ref_scores_len[val_or_test]:
227
- return format_error(f"Your submission has {len(scores['all'])} questions for the {val_or_test} set, but it should have {ref_scores_len[val_or_test]}. Please check your submission.")
228
- # Catching spam submissions of 100%
229
- if all((eval_entry[k] == 1 for k in ["score_level1", "score_level2", "score_level3"])):
230
- return format_error(f"There was a problem with your submission. Please open a discussion.")
231
-
232
- # Testing for duplicates - to see if we want to add something like it as it would allow people to try to see the content of other submissions
233
- #eval_entry_no_date = {k: v for k, v in eval_entry if k != "date"}
234
- #columns_no_date = [c for c in eval_results[val_or_test].column_names if c != "date"]
235
- #if eval_entry_no_date in eval_results[val_or_test].select_columns(columns_no_date):
236
- # return format_error(f"Your submission is an exact duplicate from an existing submission.")
237
-
238
- eval_results[val_or_test] = eval_results[val_or_test].add_item(eval_entry)
239
- print(eval_results)
240
- if LOCAL_DEBUG:
241
- print("mock uploaded results to lb")
242
- else:
243
- eval_results.push_to_hub(RESULTS_DATASET, config_name = YEAR_VERSION, token=TOKEN)
244
-
245
-
246
- return format_log(f"Model {model} submitted by {organisation} successfully.\nPlease wait a few hours and refresh the leaderboard to see your score displayed.")
247
  except Exception as e:
248
- print(e)
249
- return format_error(f"An error occurred, please open a discussion and indicate at what time you encountered the error.\n")
250
-
251
-
252
- def refresh():
253
- eval_results = load_dataset(RESULTS_DATASET, YEAR_VERSION, token=TOKEN, download_mode="force_redownload", verification_mode=VerificationMode.NO_CHECKS,trust_remote_code=True)
254
- #eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="validation")
255
- eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
256
- return eval_dataframe_test
257
-
258
- def upload_file(files):
259
- file_paths = [file.name for file in files]
260
- return file_paths
261
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
263
- demo = gr.Blocks()
264
- with demo:
265
- gr.HTML(TITLE)
266
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
267
 
268
- with gr.Row():
269
- with gr.Accordion("📙 Citation", open=False):
270
- citation_button = gr.Textbox(
271
- value=CITATION_BUTTON_TEXT,
272
- label=CITATION_BUTTON_LABEL,
273
- elem_id="citation-button",
274
- ) #.style(show_copy_button=True)
275
 
276
- gr.Markdown("Results: Test")
277
- leaderboard_table_test = gr.components.Dataframe(
278
- value=eval_dataframe_test, datatype=TYPES, interactive=False,
279
- column_widths=["20%"]
280
- )
281
- #with gr.Tab("Results: Validation"):
282
- # leaderboard_table_val = gr.components.Dataframe(
283
- # value=eval_dataframe_val, datatype=TYPES, interactive=False,
284
- # column_widths=["20%"]
285
- # )
286
 
287
- refresh_button = gr.Button("Refresh")
288
- refresh_button.click(
289
- refresh,
290
- inputs=[],
291
- outputs=[
292
- #leaderboard_table_val,
293
- leaderboard_table_test,
294
- ],
295
  )
296
- with gr.Accordion("Submit a new model for evaluation"):
297
- with gr.Row():
298
- gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text")
299
- with gr.Row():
300
- with gr.Column():
301
- #level_of_test = gr.Radio(["test"], value="test", label="Split")
302
- model_name_textbox = gr.Textbox(label="Agent name")
303
- model_family_textbox = gr.Textbox(label="Model family")
304
- system_prompt_textbox = gr.Textbox(label="System prompt example")
305
- url_textbox = gr.Textbox(label="Url to model information")
306
- with gr.Column():
307
- organisation = gr.Textbox(label="Organisation")
308
- mail = gr.Textbox(label="Contact email (will be stored privately, & used if there is an issue with your submission)")
309
- file_output = gr.File()
310
-
311
-
312
- with gr.Row():
313
- gr.LoginButton()
314
- submit_button = gr.Button("Submit Eval On Test")
315
- submission_result = gr.Markdown()
316
- submit_button.click(
317
- add_new_eval,
318
- [
319
- #level_of_test,
320
- model_name_textbox,
321
- model_family_textbox,
322
- system_prompt_textbox,
323
- url_textbox,
324
- file_output,
325
- organisation,
326
- mail
327
- ],
328
- submission_result,
329
- )
330
 
331
- scheduler = BackgroundScheduler()
332
- scheduler.add_job(restart_space, "interval", seconds=3600)
333
- scheduler.start()
334
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
+ import inspect
 
 
 
 
4
  import gradio as gr
5
+ import requests
6
  import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
 
 
 
 
 
 
 
9
 
 
10
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
 
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
 
 
 
19
 
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
+ def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
25
 
26
+ def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ messages = [HumanMessage(content=question)]
29
+ result = self.graph.invoke({"messages": messages})
30
+ answer = result['messages'][-1].content
31
+ return answer # kein [14:] mehr nötig!
32
 
 
33
 
 
 
34
 
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
42
 
43
+ if profile:
44
+ username= f"{profile.username}"
45
+ print(f"User logged in: {username}")
46
+ else:
47
+ print("User not logged in.")
48
+ return "Please Login to Hugging Face with the button.", None
49
 
50
+ api_url = DEFAULT_API_URL
51
+ questions_url = f"{api_url}/questions"
52
+ submit_url = f"{api_url}/submit"
53
 
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  try:
56
+ agent = BasicAgent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  except Exception as e:
58
+ print(f"Error instantiating agent: {e}")
59
+ return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
+ print(agent_code)
63
+
64
+ # 2. Fetch Questions
65
+ print(f"Fetching questions from: {questions_url}")
66
+ try:
67
+ response = requests.get(questions_url, timeout=15)
68
+ response.raise_for_status()
69
+ questions_data = response.json()
70
+ if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
+ print(f"Fetched {len(questions_data)} questions.")
74
+ except requests.exceptions.RequestException as e:
75
+ print(f"Error fetching questions: {e}")
76
+ return f"Error fetching questions: {e}", None
77
+ except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
+ except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
+ return f"An unexpected error occurred fetching questions: {e}", None
84
+
85
+ # 3. Run your Agent
86
+ results_log = []
87
+ answers_payload = []
88
+ print(f"Running agent on {len(questions_data)} questions...")
89
+ for item in questions_data:
90
+ task_id = item.get("task_id")
91
+ question_text = item.get("question")
92
+ if not task_id or question_text is None:
93
+ print(f"Skipping item with missing task_id or question: {item}")
94
+ continue
95
+ try:
96
+ submitted_answer = agent(question_text)
97
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
+ except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
+
103
+ if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
+
107
+ # 4. Prepare Submission
108
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
+
112
+ # 5. Submit
113
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
+ try:
115
+ response = requests.post(submit_url, json=submission_data, timeout=60)
116
+ response.raise_for_status()
117
+ result_data = response.json()
118
+ final_status = (
119
+ f"Submission Successful!\n"
120
+ f"User: {result_data.get('username')}\n"
121
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
122
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
+ f"Message: {result_data.get('message', 'No message received.')}"
124
+ )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
+ except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
+
155
+
156
+ # --- Build Gradio Interface using Blocks ---
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+
163
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
164
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
165
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
166
+
167
+ ---
168
+ **Disclaimers:**
169
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
170
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
171
+ """
172
+ )
173
 
174
+ gr.LoginButton()
 
 
 
175
 
176
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
 
 
 
 
 
 
177
 
178
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
179
+ # Removed max_rows=10 from DataFrame constructor
180
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
 
 
 
 
 
 
 
181
 
182
+ run_button.click(
183
+ fn=run_and_submit_all,
184
+ outputs=[status_output, results_table]
 
 
 
 
 
185
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
+ if __name__ == "__main__":
188
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
189
+ # Check for SPACE_HOST and SPACE_ID at startup for information
190
+ space_host_startup = os.getenv("SPACE_HOST")
191
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
192
+
193
+ if space_host_startup:
194
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
195
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
196
+ else:
197
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
198
+
199
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
200
+ print(f"✅ SPACE_ID found: {space_id_startup}")
201
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
202
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
203
+ else:
204
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
205
+
206
+ print("-"*(60 + len(" App Starting ")) + "\n")
207
+
208
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
209
+ demo.launch(debug=True, share=False)