Spaces:
Running
Running
File size: 8,311 Bytes
9346f1c 4596a70 814a536 8b1f7a0 b5b19aa 54eae7e b5b19aa 3b3db42 b98f07f 2a73469 10f9b3c 30dede7 fabb601 814a536 4103566 fabb601 814a536 fabb601 814a536 fabb601 1b8a36b a885f09 814a536 2a73469 ffefe11 818f024 614ee1f 814a536 4103566 814a536 4103566 814a536 4103566 814a536 4103566 beaaa9e f6475aa 01233b7 58733e4 d4ccaf3 10f9b3c 8daa060 65af9f4 4103566 7e8ac0e 65af9f4 d4ccaf3 e7226cc 65af9f4 21ce100 e7226cc 21ce100 d4ccaf3 21ce100 b98f07f 21ce100 16a06c4 21ce100 b98f07f 21ce100 16a06c4 21ce100 b98f07f 21ce100 16a06c4 21ce100 c6f7010 814a536 e7226cc d4ccaf3 814a536 072fab0 814a536 b98f07f 072fab0 67f7e37 072fab0 e7226cc d4ccaf3 7a7f4b5 814a536 b98f07f 7a7f4b5 bbd72ab fccd458 7a7f4b5 3d8dbe8 b98f07f 7a7f4b5 d4ccaf3 814a536 8daa060 d4ccaf3 7a7f4b5 b98f07f d4ccaf3 8daa060 f7d1b51 71f25ab f7d1b51 818f024 f7d1b51 10f9b3c 511c060 10f9b3c 814a536 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 |
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
from gradio_leaderboard import ColumnFilter, Leaderboard, SelectColumns
from huggingface_hub import snapshot_download
from src.about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.about import EVALUATION_QUEUE_TEXT, INTRODUCTION_TEXT, LLM_BENCHMARKS_TEXT
from src.about import TITLE
from src.display.css_html_js import custom_css
from src.display.utils import BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES
from src.display.utils import AutoEvalColumn, ModelType, Precision, WeightType, fields
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID
from src.envs import RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO,
local_dir=EVAL_REQUESTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(
EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS
)
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def init_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=SelectColumns(
default_selection=[
c.name for c in fields(AutoEvalColumn) if c.displayed_by_default
],
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
label="Select Columns to Display:",
),
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=[
ColumnFilter(
AutoEvalColumn.model_type.name,
type="checkboxgroup",
label="Model types",
),
ColumnFilter(
AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"
),
ColumnFilter(
AutoEvalColumn.params.name,
type="slider",
min=0.01,
max=150,
label="Select the number of parameters (B)",
),
ColumnFilter(
AutoEvalColumn.still_on_hub.name,
type="boolean",
label="Deleted/incomplete",
default=True,
),
],
bool_checkboxgroup_label="Hide models",
interactive=False,
)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard = init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"β
Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"π Running Evaluation Queue ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown(
"# βοΈβ¨ Submit your model here!", elem_classes="markdown-text"
)
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(
label="Revision commit", placeholder="main"
)
model_type = gr.Dropdown(
choices=[
t.to_str(" : ") for t in ModelType if t != ModelType.Unknown
],
label="Model type",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[
i.value.name for i in Precision if i != Precision.Unknown
],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(
label="Base model (for delta or adapter weights)"
)
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
with gr.Row():
with gr.Accordion("π Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()
|