import datetime
import os.path
import sys
import uuid
from os import PathLike
import gradio as gr
import pandas as pd
import torch
import base64
import json
from config import APP_CONFIG
from data_repository import REPOSITORY_INSTANCE, ModelScoringResult, ModelAdminRating, save_uploaded_tensor
from designs_submission_validations import validate_github_link, validate_user_designs
from domain_constants import SCORE_NAMES_MAP, EVAL_SCORE_NAMES
internal_path = os.path.join(os.path.dirname(__file__), "Bike-Bench-Internal", "src")
if internal_path not in sys.path:
sys.path.insert(0, internal_path)
from bikebench.benchmarking.benchmarking_utils import evaluate_designs
from bikebench.data_loading import data_loading
COLUMNS = list(data_loading.load_bike_bench_train().columns)
def compute_scores(
user_gen_designs: torch.Tensor,
eval_count: int,
conditional: bool,
masked: bool,
gradient_free: bool,
) -> ModelScoringResult:
expected_shape = (10_000, len(COLUMNS))
if tuple(user_gen_designs.shape) != expected_shape:
raise ValueError(f"Expected tensor of shape {expected_shape}, got {tuple(user_gen_designs.shape)}")
designs = user_gen_designs.float()
with torch.no_grad():
main_scores, _, _ = evaluate_designs(designs)
return ModelScoringResult(
uuid=str(uuid.uuid4()),
model_name="",
paper_link="",
github_link="",
submission_time=datetime.datetime.now(),
eval_count=eval_count,
conditional=conditional,
masked=masked,
gradient_free=gradient_free,
design_quality=main_scores[EVAL_SCORE_NAMES["design_quality"]],
diversity_dpp=main_scores[EVAL_SCORE_NAMES["diversity_dpp"]],
mean_novelty=main_scores[EVAL_SCORE_NAMES["mean_novelty"]],
sim_to_data_mmd=main_scores[EVAL_SCORE_NAMES["sim_to_data_mmd"]],
mean_violations=main_scores[EVAL_SCORE_NAMES["mean_violations"]],
binary_validity=main_scores[EVAL_SCORE_NAMES["binary_validity"]],
)
def process_generated_designs(
model_name: str,
github_link: str,
paper_link: str,
eval_count_str: str,
conditional: bool,
masked: bool,
gradient_free: bool,
file: str,
):
try:
# Required fields
if not model_name or model_name.strip() == "":
raise ValueError("Model name is required.")
if not eval_count_str or eval_count_str.strip() == "":
raise ValueError("Eval count is required.")
# GitHub link is optional; only validate non-empty strings
github_link = (github_link or "").strip()
if github_link:
validate_github_link(github_link)
# Eval count: must be an integer
try:
eval_count = int(eval_count_str)
except ValueError:
raise ValueError(f"Eval count must be an integer, got: {eval_count_str!r}")
if not file:
raise ValueError("Please upload a `.pt` or `.pth` file containing the designs tensor.")
tensor = torch.load(file, map_location="cpu")
if not isinstance(tensor, torch.Tensor):
raise ValueError(
f"Expected a torch.Tensor in {os.path.basename(file)}, got {type(tensor)}"
)
# Shape check happens inside compute_scores
scores = compute_scores(tensor, eval_count, conditional, masked, gradient_free)
scores.paper_link = paper_link
scores.model_name = model_name
scores.github_link = github_link # may be empty string
REPOSITORY_INSTANCE.add_model_score(scores)
save_uploaded_tensor(scores.uuid, file)
return f"✅ File uploaded successfully. Assigned model UUID: {scores.uuid}"
except ValueError as e:
# User-facing, actionable errors
return f"⚠️ Upload error: {e}"
except Exception as e:
# Log unexpected errors for debugging
print("[process_generated_designs] Unexpected error:", repr(e))
return (
"❌ An unexpected error occurred while processing your file. "
"Please ensure it is a `.pt`/`.pth` tensor of shape `(10000, D)` "
"and try again."
)
def add_rating(model_uuid: str, admin_verified: bool):
REPOSITORY_INSTANCE.add_state_model(ModelAdminRating(
uuid=model_uuid,
admin_verified=bool(admin_verified),
update_time=datetime.datetime.now(),
))
if admin_verified:
return f"Model {model_uuid} marked as Admin-verified."
else:
return f"Model {model_uuid} marked as NOT Admin-verified."
def build_filter_dict(
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
) -> dict:
"""
Build a compact, semantically named filter dict that we'll encode into a
human-readable filter ID.
NOTE: The *_str arguments may arrive as str/float/int from Gradio.
We normalize everything to strings here so they don't get silently cast
or reformatted (e.g., 0.75 → 0, 1e-3 → 0.001).
"""
def _normalize(v) -> str:
if v is None:
return ""
if isinstance(v, str):
return v.strip()
# For numbers (int/float) or anything else, just string-ify
return str(v).strip()
return {
"cond": bool(require_conditional),
"mask": bool(require_masked),
"gfree": bool(require_gradient_free),
"maxeval": _normalize(max_eval_choice), # enum; keep as string
"gh": bool(require_github),
"paper": bool(require_paper),
"verified": bool(require_admin_verified),
"dq": _normalize(min_design_quality_str),
"cv": _normalize(max_constraint_violation_str),
"mmd": _normalize(max_mmd_str),
"nov": _normalize(min_novelty_str),
"bin": _normalize(min_binary_validity_str),
"dpp": _normalize(max_dpp_str),
}
def encode_filters_to_id(filters: dict) -> str:
"""
Encode filters into a compact, human-readable string like:
'cond=1.maxeval=1e4.gh=1.dq=0.7'
Rules:
- Booleans: include only when True, as key=1
- maxeval: short codes (nol, 1, 1e2, 1e4, 1e6, 1e8); omitted for 'No limit'
- Numeric thresholds: included only when non-empty
"""
parts = []
# --- booleans: only include when True ---
def add_bool(key: str):
if filters.get(key):
parts.append(f"{key}=1")
add_bool("cond")
add_bool("mask")
add_bool("gfree")
add_bool("gh")
add_bool("paper")
add_bool("verified")
# --- maxeval (enum) ---
maxeval = filters.get("maxeval", "No limit")
maxeval_short_map = {
"No limit": "nol",
"≤ 1": "1",
"≤ 1e2": "1e2",
"≤ 1e4": "1e4",
"≤ 1e6": "1e6",
"≤ 1e8": "1e8",
}
short = maxeval_short_map.get(maxeval, "nol")
if short != "nol":
parts.append(f"maxeval={short}")
# --- performance thresholds: only if non-empty ---
for key in ["dq", "cv", "mmd", "nov", "bin", "dpp"]:
val = (filters.get(key) or "").strip()
if val != "":
parts.append(f"{key}={val}")
return ".".join(parts)
def decode_filters_from_id(filter_id: str) -> dict:
"""
Decode a human-readable ID like 'cond=1.maxeval=1e4.dq=0.7'
back into a filter dict with keys:
cond, mask, gfree, maxeval, gh, paper, admin, dq, cv, mmd, nov, bin, dpp
If parsing fails, returns {}.
"""
if not filter_id:
return {}
filters: dict = {}
try:
tokens = filter_id.split(".")
for token in tokens:
token = token.strip()
if not token or "=" not in token:
continue
key, val = token.split("=", 1)
key = key.strip()
val = val.strip()
if not key:
continue
# Boolean flags
if key in {"cond", "mask", "gfree", "gh", "paper", "verified"}:
filters[key] = val.lower() in {"1", "true", "t", "yes", "y"}
continue
# maxeval enum
if key == "maxeval":
short_to_full = {
"nol": "No limit",
"1": "≤ 1",
"1e2": "≤ 1e2",
"1e4": "≤ 1e4",
"1e6": "≤ 1e6",
"1e8": "≤ 1e8",
}
filters["maxeval"] = short_to_full.get(val, "No limit")
continue
# thresholds
if key in {"dq", "cv", "mmd", "nov", "bin", "dpp"}:
filters[key] = val
continue
return filters
except Exception:
# If anything weird happens, fall back to "no filters"
return {}
def filter_leaderboard(
# Training filters
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
# Reproducibility filters
require_github,
require_paper,
require_admin_verified,
# Performance filters (as strings, parsed to floats if provided)
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
) -> pd.DataFrame:
df = REPOSITORY_INSTANCE.get_data_to_display().copy()
# ------------------------
# Training filters
# ------------------------
cond_col = SCORE_NAMES_MAP["conditional"] # "Cond?"
mask_col = SCORE_NAMES_MAP["masked"] # "Masked?"
gfree_col = SCORE_NAMES_MAP["gradient_free"] # "Grad-\nfree?"
if require_conditional and cond_col in df.columns:
df = df[df[cond_col].astype(bool)]
if require_masked and mask_col in df.columns:
df = df[df[mask_col].astype(bool)]
if require_gradient_free and gfree_col in df.columns:
df = df[df[gfree_col].astype(bool)]
# Eval count filter
eval_col = SCORE_NAMES_MAP["eval_count"] # "Eval.\ncount"
if eval_col in df.columns:
df["Eval count_numeric"] = pd.to_numeric(df[eval_col], errors="coerce")
df["Eval count_numeric"] = df["Eval count_numeric"].fillna(float("inf"))
max_eval_map = {
"No limit": None,
"≤ 1": 1,
"≤ 1e2": 100,
"≤ 1e4": 10_000,
"≤ 1e6": 1_000_000,
"≤ 1e8": 100_000_000,
}
threshold = max_eval_map.get(max_eval_choice, None)
if threshold is not None:
df = df[df["Eval count_numeric"] <= threshold]
df = df.drop(columns=["Eval count_numeric"], errors="ignore")
# ------------------------
# Reproducibility filters
# ------------------------
if require_github and "GitHub\nLink" in df.columns:
df = df[df["GitHub\nLink"].notna() & (df["GitHub\nLink"].str.strip() != "")]
if require_paper and "Paper\nLink" in df.columns:
df = df[df["Paper\nLink"].notna() & (df["Paper\nLink"].str.strip() != "")]
if require_admin_verified and "Verified?" in df.columns:
df = df[df["Verified?"].astype(bool)]
# ------------------------
# Performance filters
# ------------------------
def _to_float_or_none(s):
# s may be str/float/int; normalize to string first
if s is None:
return None
s = str(s).strip()
if s == "":
return None
try:
return float(s)
except ValueError:
return None
min_dq = _to_float_or_none(min_design_quality_str)
max_cv = _to_float_or_none(max_constraint_violation_str)
max_mmd = _to_float_or_none(max_mmd_str)
min_nov = _to_float_or_none(min_novelty_str)
min_bin = _to_float_or_none(min_binary_validity_str)
max_dpp = _to_float_or_none(max_dpp_str)
dq_col = SCORE_NAMES_MAP["design_quality"] # "Design\nQuality ↑"
cv_col = SCORE_NAMES_MAP["mean_violations"] # "Constraint\nViolation ↓"
mmd_col = SCORE_NAMES_MAP["sim_to_data_mmd"] # "Similarity\nto Data ↓"
nov_col = SCORE_NAMES_MAP["mean_novelty"] # "Novelty ↑"
bin_col = SCORE_NAMES_MAP["binary_validity"] # "Binary\nValidity ↑"
dpp_col = SCORE_NAMES_MAP["diversity_dpp"] # "Diversity ↓"
if dq_col in df.columns and min_dq is not None:
df = df[pd.to_numeric(df[dq_col], errors="coerce") >= min_dq]
if cv_col in df.columns and max_cv is not None:
df = df[pd.to_numeric(df[cv_col], errors="coerce") <= max_cv]
if mmd_col in df.columns and max_mmd is not None:
df = df[pd.to_numeric(df[mmd_col], errors="coerce") <= max_mmd]
if nov_col in df.columns and min_nov is not None:
df = df[pd.to_numeric(df[nov_col], errors="coerce") >= min_nov]
if bin_col in df.columns and min_bin is not None:
df = df[pd.to_numeric(df[bin_col], errors="coerce") >= min_bin]
if dpp_col in df.columns and max_dpp is not None:
df = df[pd.to_numeric(df[dpp_col], errors="coerce") <= max_dpp]
# ------------------------
# Final sorting
# ------------------------
if dq_col in df.columns:
df = df.sort_values(dq_col, ascending=False)
return df
def filter_leaderboard_with_id(
require_conditional: bool,
require_masked: bool,
require_gradient_free: bool,
max_eval_choice: str,
require_github: bool,
require_paper: bool,
require_admin_verified: bool,
min_design_quality_str: str,
max_constraint_violation_str: str,
max_mmd_str: str,
min_novelty_str: str,
min_binary_validity_str: str,
max_dpp_str: str,
visible_columns: list[str] | None,
):
filters = build_filter_dict(
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
)
filter_id = encode_filters_to_id(filters)
df = filter_leaderboard(
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
)
# Decide which columns to show, preserving df.columns order
if visible_columns:
display_cols = [c for c in df.columns if c in visible_columns]
else:
display_cols = list(df.columns)
styled_df = style_leaderboard_df(df, display_cols)
column_widths = compute_leaderboard_column_widths(display_cols)
# Return an update object for the Dataframe, plus the filter_id
return gr.update(value=styled_df, column_widths=column_widths), filter_id
def load_filters_from_id(filter_id: str, visible_columns: list[str] | None = None):
"""
Given a filter ID, decode it, map back to widget values,
apply filters, and return all widget values + the leaderboard DF.
"""
filters = decode_filters_from_id(filter_id)
def _get_bool(key: str, default: bool = False) -> bool:
val = filters.get(key, default)
if isinstance(val, str):
return val.lower() in ("1", "true", "t", "yes", "y")
return bool(val)
def _get_str(key: str, default: str = "") -> str:
val = filters.get(key, default)
if val is None:
return default
return str(val)
# Training filters
require_conditional = _get_bool("cond", False)
require_masked = _get_bool("mask", False)
require_gradient_free = _get_bool("gfree", False)
max_eval_choice = _get_str("maxeval", "No limit")
allowed_max_eval_choices = ["No limit", "≤ 1", "≤ 1e2", "≤ 1e4", "≤ 1e6", "≤ 1e8"]
if max_eval_choice not in allowed_max_eval_choices:
max_eval_choice = "No limit"
# Reproducibility filters
require_github = _get_bool("gh", False)
require_paper = _get_bool("paper", False)
require_admin_verified = _get_bool("verified", False)
# Performance filters (strings)
min_design_quality_str = _get_str("dq", "")
max_constraint_violation_str = _get_str("cv", "")
max_mmd_str = _get_str("mmd", "")
min_novelty_str = _get_str("nov", "")
min_binary_validity_str = _get_str("bin", "")
max_dpp_str = _get_str("dpp", "")
# Apply filters
df = filter_leaderboard(
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
)
styled_df = style_leaderboard_df(df, visible_columns)
return (
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
styled_df,
)
def get_display_column_order(df: pd.DataFrame) -> list[str]:
"""
Return columns in the exact order we want to display in the table.
Currently this just preserves df.columns, but you can later
customize (e.g., move 'Model Name' first, etc.).
"""
return list(df.columns)
DEFAULT_VISIBLE_COLUMNS = [
"Model\nName",
"Design\nQuality ↑",
"Constraint\nViolation ↓",
"Similarity\nto Data ↓",
"Diversity ↓",
]
def init_filters_from_id(filter_id: str):
filter_id = (filter_id or "").strip()
if filter_id:
# When filters are in the URL or manually pasted, use them + default visible columns
results = load_filters_from_id(filter_id, DEFAULT_VISIBLE_COLUMNS)
*filter_vals, styled_df = results
return (*filter_vals, styled_df, filter_id)
# ---- No filter ID in URL → our custom defaults ----
# Defaults:
# - require_conditional = False
# - require_masked = False
# - require_gradient_free = False
# - max_eval_choice = "≤ 1e6"
# - require_github = False
# - require_paper = False
# - require_admin_verified = True
# - performance thresholds = "" (no constraints)
require_conditional = False
require_masked = False
require_gradient_free = False
max_eval_choice = "≤ 1e6"
require_github = False
require_paper = False
require_admin_verified = True
min_design_quality_str = ""
max_constraint_violation_str = ""
max_mmd_str = ""
min_novelty_str = ""
min_binary_validity_str = ""
max_dpp_str = ""
# Apply those defaults to the leaderboard
df = filter_leaderboard(
require_conditional=require_conditional,
require_masked=require_masked,
require_gradient_free=require_gradient_free,
max_eval_choice=max_eval_choice,
require_github=require_github,
require_paper=require_paper,
require_admin_verified=require_admin_verified,
min_design_quality_str=min_design_quality_str,
max_constraint_violation_str=max_constraint_violation_str,
max_mmd_str=max_mmd_str,
min_novelty_str=min_novelty_str,
min_binary_validity_str=min_binary_validity_str,
max_dpp_str=max_dpp_str,
)
styled_df = style_leaderboard_df(df, DEFAULT_VISIBLE_COLUMNS)
# Build the corresponding default filter ID: "verified=1.maxeval=1e6"
default_filters = build_filter_dict(
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
)
default_filter_id = encode_filters_to_id(default_filters)
return (
require_conditional,
require_masked,
require_gradient_free,
max_eval_choice,
require_github,
require_paper,
require_admin_verified,
min_design_quality_str,
max_constraint_violation_str,
max_mmd_str,
min_novelty_str,
min_binary_validity_str,
max_dpp_str,
styled_df,
default_filter_id, # <-- shows "verified=1.maxeval=1e6" on first load
)
def style_leaderboard_df(df: pd.DataFrame, visible_columns: list[str] | None = None):
# Optionally subset columns, but preserve the original df column order.
if visible_columns:
# Keep columns in the order they appear in df, just drop the ones not selected
cols = [c for c in df.columns if c in visible_columns]
if cols:
df = df[cols]
numeric_cols = df.select_dtypes(include="number").columns
if len(numeric_cols) == 0:
return df
fmt: dict[str, str] = {}
eval_col_name = SCORE_NAMES_MAP["eval_count"] # "Eval.\ncount"
for col in numeric_cols:
if col == eval_col_name:
fmt[col] = "{:.0f}"
else:
fmt[col] = "{:.5f}"
return df.style.format(fmt)
def compute_leaderboard_column_widths(columns: list[str]) -> list[int]:
"""
Return a list of pixel widths aligned with the given columns.
"""
widths_by_name = {
# Core identifiers
"Model\nName": 250,
"Submission\nDate": 110,
"Model\nUUID": 60,
"Verified?": 100,
"Verified\nDate": 110,
# Main scores
"Design\nQuality ↑": 100,
"Constraint\nViolation ↓": 110,
"Similarity\nto Data ↓": 110,
"Novelty ↑": 100,
"Binary\nValidity ↑": 110,
"Diversity ↓": 110,
# Training / config metadata
"Eval.\ncount": 90,
"Cond?": 60,
"Masked?": 80,
"Grad-\nfree?": 80,
# Links
"GitHub\nLink": 70,
"Paper\nLink": 70,
}
default_width = 130
return [widths_by_name.get(col, default_width) for col in columns]
def build_approval_app():
pass
def build_app():
with gr.Blocks() as gradio_app:
gr.HTML(
"""
"""
)
gr.HTML(
"""
"""
)
with gr.Tab("BikeBench Leaderboard"):
gr.Markdown("## BikeBench Leaderboard")
with gr.Row():
with gr.Column(scale=1):
# ------------------------------------
# Filter ID & actions (top section)
# ------------------------------------
apply_filters_btn = gr.Button("Apply filters")
# ------------------------------------
# Training filters
# ------------------------------------
gr.Markdown("### Training filters")
require_conditional = gr.Checkbox(label="Conditional?", value=False)
require_masked = gr.Checkbox(label="Masked?", value=False)
require_gradient_free = gr.Checkbox(label="Grad-free?", value=True)
max_eval_dropdown = gr.Dropdown(
label="Max eval count",
choices=["No limit", "≤ 1", "≤ 1e2", "≤ 1e4", "≤ 1e6", "≤ 1e8"],
value="≤ 1e6",
)
# ------------------------------------
# Reproducibility filters
# ------------------------------------
gr.Markdown("### Reproducibility filters")
require_github = gr.Checkbox(label="Has GitHub link?", value=False)
require_paper = gr.Checkbox(label="Has paper link?", value=False)
require_admin_verified = gr.Checkbox(label="Admin-verified?", value=False)
# ------------------------------------
# Performance filters
# ------------------------------------
gr.Markdown("### Performance filters")
min_design_quality_in = gr.Textbox(label="Min Design Quality (HV)", type="text")
max_constraint_violation_in = gr.Textbox(label="Max Constraint Violation", type="text")
max_mmd_in = gr.Textbox(label="Max Sim. to Data (MMD)", type="text")
min_novelty_in = gr.Textbox(label="Min Novelty", type="text")
min_binary_validity_in = gr.Textbox(label="Min Binary Validity", type="text")
max_dpp_in = gr.Textbox(label="Max Diversity (DPP)", type="text")
gr.Markdown("### Filter ID")
filter_id_box = gr.Textbox(
label="Filter ID (copy/paste to share or load)",
value="",
)
load_filters_btn = gr.Button("Load from ID")
with gr.Column(scale=8):
initial_df = REPOSITORY_INSTANCE.get_data_to_display()
ordered_cols = get_display_column_order(initial_df)
initial_df = initial_df[ordered_cols]
# Keep only defaults that actually exist in the df
default_visible = [c for c in DEFAULT_VISIBLE_COLUMNS if c in ordered_cols]
# Fallback: if for some reason default_visible is empty, show everything
display_cols = default_visible or ordered_cols
# Widths must line up with the columns actually shown
column_widths = compute_leaderboard_column_widths(display_cols)
visible_columns = gr.CheckboxGroup(
label="Columns to display",
choices=ordered_cols,
value=display_cols,
)
leaderboard_df = gr.Dataframe(
value=style_leaderboard_df(initial_df, display_cols),
label="Leaderboard",
interactive=False,
max_height=600,
wrap=False,
# column_widths=column_widths, # <--- remove this
pinned_columns=0,
show_row_numbers=False,
elem_id="leaderboard_df",
)
gr.Markdown(
"""
**Project links:**
• [BikeBench GitHub repository](https://github.com/Lyleregenwetter/Bikebench)
• [BikeBench paper (arXiv)](https://arxiv.org/pdf/2508.00830)
**Citation (BibTeX):**
```bibtex
@inproceedings{regenwetter2025bike,
title={BikeBench: A Bicycle Design Benchmark for Generative Models with Objectives and Constraints},
author=Regenwetter, Lyle and Obaideh, Yazan Abu and Chiotti, Fabien and Lykourentzou, Ioanna and Ahmed, Faez},
journal={Advances in Neural Information Processing Systems},
year={2025}
}
```
"""
)
# ------------------------------------------------------
# Apply filters: Python → filtered DF + filter ID
# ------------------------------------------------------
apply_filters_btn.click(
fn=filter_leaderboard_with_id,
inputs=[
# Training
require_conditional,
require_masked,
require_gradient_free,
max_eval_dropdown,
# Reproducibility
require_github,
require_paper,
require_admin_verified,
# Performance
min_design_quality_in,
max_constraint_violation_in,
max_mmd_in,
min_novelty_in,
min_binary_validity_in,
max_dpp_in,
# NEW
visible_columns,
],
outputs=[leaderboard_df, filter_id_box],
)
# Load from ID: set filters + reload the same way
load_filters_btn.click(
fn=load_filters_from_id,
inputs=[filter_id_box, visible_columns],
outputs=[
# Training
require_conditional,
require_masked,
require_gradient_free,
max_eval_dropdown,
# Reproducibility
require_github,
require_paper,
require_admin_verified,
# Performance
min_design_quality_in,
max_constraint_violation_in,
max_mmd_in,
min_novelty_in,
min_binary_validity_in,
max_dpp_in,
# Table
leaderboard_df,
],
)
visible_columns.change(
fn=filter_leaderboard_with_id,
inputs=[
# Training
require_conditional,
require_masked,
require_gradient_free,
max_eval_dropdown,
# Reproducibility
require_github,
require_paper,
require_admin_verified,
# Performance
min_design_quality_in,
max_constraint_violation_in,
max_mmd_in,
min_novelty_in,
min_binary_validity_in,
max_dpp_in,
# Columns
visible_columns,
],
outputs=[leaderboard_df, filter_id_box],
)
# ------------------------------------------------------
# On page load: read ?filters from URL, pass to Python,
# and initialize all widgets + DF + filter_id_box
# ------------------------------------------------------
gradio_app.load(
fn=init_filters_from_id,
inputs=[filter_id_box], # initial value is "", so you'll hit the default branch
outputs=[
# Training
require_conditional,
require_masked,
require_gradient_free,
max_eval_dropdown,
# Reproducibility
require_github,
require_paper,
require_admin_verified,
# Performance
min_design_quality_in,
max_constraint_violation_in,
max_mmd_in,
min_novelty_in,
min_binary_validity_in,
max_dpp_in,
# Table + Filter ID
leaderboard_df,
filter_id_box,
],
)
# ----------------------------------------------------------
# Upload tab
# ----------------------------------------------------------
with gr.Tab("Upload File"):
gr.Markdown(
"Upload a `.pt` or `.pth` tensor with shape `(10000, D)` and specify model attributes.\n\n"
"**Required fields:** Model Name, Eval count, and designs tensor file."
)
model_name_in = gr.Textbox(label="Model Name (required)")
github_link_in = gr.Textbox(label="Github Link (optional; necessary for official verification)")
paper_link_in = gr.Textbox(label="Paper link (optional)")
eval_count_in = gr.Textbox(label="Eval count (integer, required)")
conditional_in = gr.Checkbox(label="Conditional?")
masked_in = gr.Checkbox(label="Masked?")
gradient_free_in = gr.Checkbox(label="Gradient-free?")
file_in = gr.File(
label="Upload designs tensor (.pt or .pth)",
file_types=[".pt", ".pth"],
type="filepath",
)
submit_btn = gr.Button("Submit")
output_box = gr.Textbox(label="Result", interactive=False)
submit_btn.click(
fn=process_generated_designs,
inputs=[
model_name_in,
github_link_in,
paper_link_in,
eval_count_in,
conditional_in,
masked_in,
gradient_free_in,
file_in,
],
outputs=output_box,
)
# ----------------------------------------------------------
# Ratings tab
# ----------------------------------------------------------
if not APP_CONFIG.production:
with gr.Tab("Add rating"):
rating_uuid_in = gr.Textbox(label="Model UUID")
rating_verified_in = gr.Checkbox(label="Admin-verified?", value=True)
rating_out = gr.Textbox(label="Result", interactive=False)
gr.Button("Save rating").click(
fn=add_rating,
inputs=[rating_uuid_in, rating_verified_in],
outputs=rating_out,
)
return gradio_app
if __name__ == "__main__":
build_app().launch(debug=(not APP_CONFIG.production))