TTimur commited on
Commit
c097229
·
0 Parent(s):

Initial commit

Browse files
Files changed (49) hide show
  1. .DS_Store +0 -0
  2. .gitattributes +35 -0
  3. .gitignore +15 -0
  4. .pre-commit-config.yaml +53 -0
  5. Makefile +13 -0
  6. README.md +36 -0
  7. README_github.md +154 -0
  8. app.py +348 -0
  9. kyrgyz_results/results_Qwen__Qwen2.5-0.5B-Instruct_few_shot.json +30 -0
  10. kyrgyz_results/results_Qwen__Qwen2.5-0.5B-Instruct_zero_shot.json +30 -0
  11. kyrgyz_results/results_Qwen__Qwen2.5-1.5B-Instruct_few_shot.json +30 -0
  12. kyrgyz_results/results_Qwen__Qwen2.5-1.5B-Instruct_zero_shot.json +30 -0
  13. kyrgyz_results/results_Qwen__Qwen2.5-3B-Instruct_few_shot.json +30 -0
  14. kyrgyz_results/results_Qwen__Qwen2.5-3B-Instruct_zero_shot.json +30 -0
  15. kyrgyz_results/results_Qwen__Qwen2.5-7B-Instruct_few_shot.json +30 -0
  16. kyrgyz_results/results_Qwen__Qwen2.5-7B-Instruct_zero_shot.json +30 -0
  17. kyrgyz_results/results_Qwen__Qwen3-0.6B_few_shot.json +30 -0
  18. kyrgyz_results/results_Qwen__Qwen3-0.6B_zero_shot.json +30 -0
  19. kyrgyz_results/results_Qwen__Qwen3-1.7B_few_shot.json +30 -0
  20. kyrgyz_results/results_Qwen__Qwen3-1.7B_zero_shot.json +30 -0
  21. kyrgyz_results/results_Qwen__Qwen3-4B_few_shot.json +30 -0
  22. kyrgyz_results/results_Qwen__Qwen3-4B_zero_shot.json +30 -0
  23. kyrgyz_results/results_Qwen__Qwen3-8B_few_shot.json +30 -0
  24. kyrgyz_results/results_Qwen__Qwen3-8B_zero_shot.json +30 -0
  25. kyrgyz_results/results_google__gemma-3-1b-it_few_shot.json +30 -0
  26. kyrgyz_results/results_google__gemma-3-1b-it_zero_shot.json +30 -0
  27. kyrgyz_results/results_google__gemma-3-270m_few_shot.json +30 -0
  28. kyrgyz_results/results_google__gemma-3-270m_zero_shot.json +30 -0
  29. kyrgyz_results/results_google__gemma-3-4b-it_few_shot.json +30 -0
  30. kyrgyz_results/results_google__gemma-3-4b-it_zero_shot.json +30 -0
  31. kyrgyz_results/results_meta-llama__Llama-3.1-8B-Instruct_few_shot.json +30 -0
  32. kyrgyz_results/results_meta-llama__Llama-3.1-8B-Instruct_zero_shot.json +30 -0
  33. kyrgyz_results/results_meta-llama__Llama-3.2-1B-Instruct_few_shot.json +30 -0
  34. kyrgyz_results/results_meta-llama__Llama-3.2-1B-Instruct_zero_shot.json +30 -0
  35. kyrgyz_results/results_meta-llama__Llama-3.2-3B-Instruct_few_shot.json +30 -0
  36. kyrgyz_results/results_meta-llama__Llama-3.2-3B-Instruct_zero_shot.json +30 -0
  37. pyproject.toml +13 -0
  38. requirements.txt +15 -0
  39. scripts/create_request_file.py +107 -0
  40. scripts/generate_kyrgyz_results_json.py +117 -0
  41. src/display/about.py +103 -0
  42. src/display/css_html_js.py +111 -0
  43. src/display/formatting.py +36 -0
  44. src/display/utils.py +135 -0
  45. src/envs.py +20 -0
  46. src/leaderboard/read_evals.py +195 -0
  47. src/populate.py +56 -0
  48. src/submission/check_validity.py +103 -0
  49. src/submission/submit.py +113 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ gpt_4_evals/
10
+ human_evals/
11
+ eval-queue/
12
+ eval-results/
13
+ auto_evals/
14
+
15
+ src/assets/model_counts.html
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: OpenLLM Turkish leaderboard v0.2
3
+ emoji: 🥇
4
+ colorFrom: green
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.36.1
8
+ app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+ Most of the variables to change for a default leaderboard are in env (replace the path for your leaderboard) and src/display/about.
16
+
17
+ Results files should have the following format:
18
+ ```
19
+ {
20
+ "config": {
21
+ "model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
22
+ "model_name": "path of the model on the hub: org/model",
23
+ "model_sha": "revision on the hub",
24
+ },
25
+ "results": {
26
+ "task_name": {
27
+ "metric_name": score,
28
+ },
29
+ "task_name2": {
30
+ "metric_name": score,
31
+ }
32
+ }
33
+ }
34
+ ```
35
+
36
+ Request files are created automatically by this tool.
README_github.md ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Kyrgyz LLM Evaluation Dataset
2
+
3
+ Welcome to the **KyrgyzLLM-Bench - kyrgyz LLM Evaluation Dataset**, your one-stop solution for evaluating Large Language Models (LLMs) in Kyrgyz. This toolkit helps you measure model performance across diverse domains and question types specific to the Kyrgyz language, so your models can be more accurate, robust, and helpful for Kyrgyz-speaking users. Whether you're a researcher, developer, or practitioner—this dataset is tailored to help your Kyrgyz-capable LLM thrive.
4
+
5
+ [![Paper](https://img.shields.io/badge/IEEE%20Xplore-Paper-blue)](https://ieeexplore.ieee.org/document/11206960)
6
+ [![Model](https://img.shields.io/badge/HuggingFace-Hub-yellow)](https://huggingface.co/TTimur)
7
+
8
+
9
+ ![Kyrgyz LLM Evaluation in Tech Landscape](Kyrgyz%20LLM%20Evaluation%20in%20Tech%20Landscape.png "Kyrgyz LLM Evaluation")
10
+
11
+
12
+ Quick facts:
13
+ - Language Support: Kyrgyz (ky)
14
+ - Audience: Researchers, developers, and the Kyrgyz NLP community
15
+ - A native kyrgyz language datasets:
16
+ • MMLU
17
+ • Reading comprehension
18
+
19
+ - Translated Benchmarks
20
+ • Commonsense reasoning & understanding: HellaSwag and WinoGrande
21
+ • Reading comprehension: BoolQ
22
+ • Mathematics: GSM8K
23
+ • Robustness & factuality: TruthfulQA
24
+ - Tooling: First-class support with Lighteval (see `lighteval/` scripts) and LM_harness (see `lm_harness/` scripts)
25
+
26
+ ## 🔍 What's Inside?
27
+
28
+ KyrgyzLLM-Bench is a comprehensive suite purpose-built to evaluate LLMs’ deep understanding and reasoning in **Kyrgyz**. It combines natively authored benchmarks with carefully translated and post-edited international tasks to provide broad and culturally grounded coverage.
29
+
30
+ - **Language**: Kyrgyz (ky)
31
+ - **Components**:
32
+ - KyrgyzMMLU (native, multiple-choice, 7,977 questions)
33
+ - KyrgyzRC (native, reading comprehension, 400 questions)
34
+ - Translated benchmarks: HellaSwag, WinoGrande, BoolQ, GSM8K, TruthfulQA (manually post-edited)
35
+
36
+ ### 🧠 Diverse and Deep Evaluation Domains
37
+
38
+ KyrgyzLLM-Bench spans foundational sciences, humanities, and applied domains relevant to the Kyrgyz national curriculum and public knowledge.
39
+
40
+ #### KyrgyzMMLU (native, multiple-choice)
41
+ - Total: 7,977 questions written by curriculum experts
42
+ - Subjects and counts:
43
+ - Math: 1,169
44
+ - Physics: 1,228
45
+ - Geography: 640
46
+ - Biology: 1,550
47
+ - Kyrgyz Language: 360
48
+ - Kyrgyz Literature: 1,169
49
+ - Kyrgyz History: 440
50
+ - Medicine: 216
51
+ - Chemistry: 1,205
52
+
53
+ #### KyrgyzRC (native, reading comprehension)
54
+ - Total: 400 multiple-choice questions (4 options, 1 correct)
55
+ - Sources: Kyrgyz Wikipedia, national news, literature, and school-style math word problems
56
+ - Skills evaluated: factual understanding, inference, vocabulary-in-context, multi-sentence reasoning
57
+
58
+ #### Translated Benchmarks (with manual post-editing)
59
+ - Commonsense reasoning: HellaSwag, WinoGrande
60
+ - Reading comprehension: BoolQ
61
+ - Mathematics: GSM8K
62
+ - Robustness/factuality: TruthfulQA
63
+
64
+ Translation pipeline: dual-model machine translation (Claude 4 Sonnet, Gemini 2.5 Flash), ensemble comparison, expert post-editing, and quality checks (incl. back-translation sampling).
65
+
66
+ ## ⚡ Turbocharge Your Evaluations with Lighteval 🚀
67
+ If you want to evaluate models with Lighteval, please see `README_lighteval.md` — all installation steps, supported Kyrgyz tasks, example commands (HF and local), and leaderboard task files are documented there.
68
+
69
+ - Guide: [README_lighteval.md](./README_lighteval.md)
70
+
71
+ ## 📊 Results
72
+
73
+ Below are the benchmark results for **Kyrgyz** in both zero-shot and few-shot settings.
74
+ Higher scores indicate better performance (accuracy for most tasks, QEM for GSM8K).
75
+
76
+ ---
77
+
78
+ ### 🏔️ Kyrgyz Zero-Shot Evaluation Results
79
+
80
+ | Model | KyrgyzMMLU | KyrgyzRC | WinoGrande | BoolQ | HellaSwag | GSM8K | TruthfulQA | **Average** |
81
+ | :-------------------- | :--------: | :------: | :--------: | :---: | :-------: | :---: | :--------: | :---------: |
82
+ | **Qwen** | | | | | | | | |
83
+ | Qwen2.5-0.5B-Instruct | 27.4 | 53.2 | 51.5 | 37.9 | 14.6 | 0.7 | 33.5 | 31.3 |
84
+ | Qwen2.5-1.5B-Instruct | 27.9 | 60.5 | 50.1 | 38.6 | 22.9 | 0.7 | 32.5 | 33.3 |
85
+ | Qwen2.5-3B-Instruct | 28.6 | 66.0 | 50.5 | 59.4 | 22.0 | 0.7 | 34.2 | 37.3 |
86
+ | Qwen2.5-7B-Instruct | 31.5 | 70.0 | 48.7 | 56.3 | 10.0 | 1.1 | 34.1 | 36.0 |
87
+ | Qwen3-0.6B | 26.0 | 61.8 | 49.8 | 38.0 | 11.1 | 0.7 | 29.9 | 31.0 |
88
+ | Qwen3-1.7B | 27.9 | 61.8 | 48.9 | 40.4 | 24.6 | 0.7 | 29.6 | 33.4 |
89
+ | Qwen3-4B | 30.3 | 68.2 | 49.0 | 38.3 | 24.5 | 0.7 | 32.9 | 34.8 |
90
+ | Qwen3-8B | 32.1 | 71.8 | 51.0 | 39.2 | 24.6 | 0.7 | 34.7 | 36.3 |
91
+ | **Gemma** | | | | | | | | |
92
+ | gemma-3-1b-it | 26.7 | 58.2 | 50.0 | 37.9 | 24.4 | 0.7 | 34.0 | 33.1 |
93
+ | gemma-3-270m | 27.5 | 56.8 | 48.3 | 37.9 | 17.4 | 0.7 | 34.7 | 31.9 |
94
+ | gemma-3-4b-it | 30.3 | 70.2 | 50.6 | 58.3 | 24.6 | 0.7 | 34.7 | **38.5** |
95
+ | **Meta-Llama** | | | | | | | | |
96
+ | Llama-3.1-8B-Instruct | 31.0 | 75.2 | 50.6 | 50.3 | 26.6 | 0.7 | 33.7 | 38.3 |
97
+ | Llama-3.2-1B-Instruct | 26.3 | 58.2 | 49.4 | 38.3 | 0.2 | 0.7 | 30.1 | 29.0 |
98
+ | Llama-3.2-3B-Instruct | 27.8 | 64.2 | 49.1 | 43.1 | 24.5 | 0.7 | 31.5 | 34.4 |
99
+
100
+
101
+ *Zero-shot evaluation results on Kyrgyz benchmarks (%). The metric is accuracy, except for GSM8K which uses QEM. Higher is better.*
102
+
103
+ ---
104
+
105
+ ### 🏔️ Kyrgyz Few-Shot Evaluation Results
106
+
107
+ | Model | KyrgyzMMLU | KyrgyzRC | WinoGrande | BoolQ | HellaSwag | GSM8K | TruthfulQA | **Average** |
108
+ | :-------------------- | :--------: | :------: | :--------: | :---: | :-------: | :---: | :--------: | :---------: |
109
+ | **Qwen** | | | | | | | | |
110
+ | Qwen2.5-0.5B-Instruct | 25.4 | 54.0 | 49.7 | 61.0 | 25.9 | 2.2 | 33.4 | 35.9 |
111
+ | Qwen2.5-1.5B-Instruct | 28.7 | 67.5 | 50.1 | 58.0 | 26.5 | 6.1 | 32.9 | 38.5 |
112
+ | Qwen2.5-3B-Instruct | 34.0 | 73.2 | 51.3 | 57.4 | 23.7 | 9.5 | 34.4 | 40.5 |
113
+ | Qwen2.5-7B-Instruct | 38.5 | 74.8 | 50.4 | 64.6 | 17.8 | 32.1 | 36.2 | 44.9 |
114
+ | Qwen3-0.6B | 26.8 | 59.5 | 50.1 | 60.1 | 26.4 | 4.3 | 30.0 | 36.8 |
115
+ | Qwen3-1.7B | 30.8 | 71.2 | 48.6 | 62.0 | 25.2 | 18.5 | 30.3 | 41.0 |
116
+ | Qwen3-4B | 38.5 | 77.2 | 48.1 | 74.0 | 24.7 | 51.5 | 32.5 | 49.4 |
117
+ | Qwen3-8B | 44.5 | 81.8 | 50.6 | 76.9 | 26.4 | 60.0 | 35.8 | **53.7** |
118
+ | **Gemma** | | | | | | | | |
119
+ | gemma-3-1b-it | 26.5 | 38.0 | 48.9 | 62.8 | 23.5 | 3.2 | 31.3 | 33.5 |
120
+ | gemma-3-270m | 27.0 | 53.2 | 48.7 | 61.5 | 27.6 | 1.4 | 36.6 | 36.6 |
121
+ | gemma-3-4b-it | 29.5 | 25.0 | 49.6 | 62.1 | 24.6 | 0.0 | 50.0 | 34.5 |
122
+ | **Meta-Llama** | | | | | | | | |
123
+ | Llama-3.1-8B-Instruct | 38.1 | 80.5 | 51.6 | 75.5 | 21.9 | 37.0 | 34.4 | 48.5 |
124
+ | Llama-3.2-1B-Instruct | 26.1 | 45.8 | 49.7 | 62.0 | 25.8 | 2.7 | 30.3 | 34.7 |
125
+ | Llama-3.2-3B-Instruct | 29.4 | 64.8 | 48.9 | 62.3 | 25.3 | 12.9 | 32.9 | 39.6 |
126
+
127
+
128
+ *Few-shot evaluation results on Kyrgyz benchmarks (%). All tasks are 5-shot, except for HellaSwag (10-shot). The metric is accuracy, except for GSM8K which uses QEM. Higher is better.*
129
+
130
+
131
+ ## 💡 Contributions Welcome!
132
+
133
+ Have ideas, bug fixes, or want to add a custom task? We'd love for you to be part of the journey! Contributions help grow and enhance the capabilities of the **KyrgyzLLM-Bench**.
134
+
135
+ ## 📜 Citation
136
+
137
+ Thanks for using **KyrgyzLLM-Bench** — where language learning models meet Serbian precision and creativity! Let's build smarter models together. 🚀�
138
+
139
+ If you find this dataset useful in your research, please cite it as follows:
140
+
141
+ ```bibtex
142
+ @article{KyrgyzLLM-Bench,
143
+ title={Bridging the Gap in Less-Resourced Languages: Building a Benchmark for Kyrgyz Language Models},
144
+ author={Timur Turatali, Aida Turdubaeva, Islam Zhenishbekov, Zhoomart Suranbaev, Anton Alekseev, Rustem Izmailov},
145
+ year={2025},
146
+ url={https://huggingface.co/datasets/TTimur/kyrgyzMMLU,
147
+ https://huggingface.co/datasets/TTimur/kyrgyzRC,
148
+ https://huggingface.co/datasets/TTimur/winogrande_kg,
149
+ https://huggingface.co/datasets/TTimur/boolq_kg,
150
+ https://huggingface.co/datasets/TTimur/truthfulqa_kg,
151
+ https://huggingface.co/datasets/TTimur/gsm8k_kg,
152
+ https://huggingface.co/datasets/TTimur/hellaswag_kg}
153
+ }
154
+ ```
app.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from apscheduler.schedulers.background import BackgroundScheduler
4
+ from huggingface_hub import snapshot_download
5
+ import os
6
+ os.environ['CURL_CA_BUNDLE'] = ''
7
+
8
+ from src.display.about import (
9
+ CITATION_BUTTON_LABEL,
10
+ CITATION_BUTTON_TEXT,
11
+ EVALUATION_QUEUE_TEXT,
12
+ INTRODUCTION_TEXT,
13
+ LLM_BENCHMARKS_TEXT,
14
+ TITLE,
15
+ )
16
+ from src.display.css_html_js import custom_css
17
+ from src.display.utils import (
18
+ BENCHMARK_COLS,
19
+ COLS,
20
+ EVAL_COLS,
21
+ EVAL_TYPES,
22
+ NUMERIC_INTERVALS,
23
+ TYPES,
24
+ AutoEvalColumn,
25
+ ModelType,
26
+ fields,
27
+ WeightType,
28
+ Precision
29
+ )
30
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, TOKEN, QUEUE_REPO, REPO_ID, RESULTS_REPO
31
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
+ from src.submission.submit import add_new_eval
33
+
34
+
35
+ def restart_space():
36
+ API.restart_space(repo_id=REPO_ID, token=TOKEN)
37
+
38
+ try:
39
+ print(EVAL_REQUESTS_PATH)
40
+ snapshot_download(
41
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
42
+ )
43
+ except Exception:
44
+ restart_space()
45
+ try:
46
+ print(EVAL_RESULTS_PATH)
47
+ snapshot_download(
48
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
49
+ )
50
+ except Exception:
51
+ restart_space()
52
+
53
+
54
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
55
+ leaderboard_df = original_df.copy()
56
+
57
+ (
58
+ finished_eval_queue_df,
59
+ running_eval_queue_df,
60
+ pending_eval_queue_df,
61
+ ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
62
+
63
+
64
+ # Searching and filtering
65
+ def update_table(
66
+ hidden_df: pd.DataFrame,
67
+ columns: list,
68
+ type_query: list,
69
+ precision_query: str,
70
+ size_query: list,
71
+ show_deleted: bool,
72
+ query: str,
73
+ ):
74
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
75
+ filtered_df = filter_queries(query, filtered_df)
76
+ df = select_columns(filtered_df, columns)
77
+ return df
78
+
79
+
80
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
81
+ return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
82
+
83
+
84
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
85
+ always_here_cols = [
86
+ AutoEvalColumn.model_type_symbol.name,
87
+ AutoEvalColumn.model.name,
88
+ ]
89
+ # We use COLS to maintain sorting
90
+ filtered_df = df[
91
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
92
+ ]
93
+ return filtered_df
94
+
95
+
96
+ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
97
+ final_df = []
98
+ if query != "":
99
+ queries = [q.strip() for q in query.split(";")]
100
+ for _q in queries:
101
+ _q = _q.strip()
102
+ if _q != "":
103
+ temp_filtered_df = search_table(filtered_df, _q)
104
+ if len(temp_filtered_df) > 0:
105
+ final_df.append(temp_filtered_df)
106
+ if len(final_df) > 0:
107
+ filtered_df = pd.concat(final_df)
108
+ filtered_df = filtered_df.drop_duplicates(
109
+ subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
110
+ )
111
+
112
+ return filtered_df
113
+
114
+
115
+ def filter_models(
116
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
117
+ ) -> pd.DataFrame:
118
+ # Show all models
119
+ if show_deleted:
120
+ filtered_df = df
121
+ else: # Show only still on the hub models
122
+ filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
123
+
124
+ type_emoji = [t[0] for t in type_query]
125
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
126
+ filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
127
+
128
+ numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
129
+ params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
130
+ mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
131
+ filtered_df = filtered_df.loc[mask]
132
+
133
+ return filtered_df
134
+
135
+
136
+ demo = gr.Blocks(css=custom_css)
137
+ with demo:
138
+ gr.HTML(TITLE)
139
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
140
+
141
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
142
+ with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
143
+ with gr.Row():
144
+ with gr.Column():
145
+ with gr.Row():
146
+ search_bar = gr.Textbox(
147
+ placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
148
+ show_label=False,
149
+ elem_id="search-bar",
150
+ )
151
+ with gr.Row():
152
+ shown_columns = gr.CheckboxGroup(
153
+ choices=[
154
+ c.name
155
+ for c in fields(AutoEvalColumn)
156
+ if not c.hidden and not c.never_hidden and not c.dummy
157
+ ],
158
+ value=[
159
+ c.name
160
+ for c in fields(AutoEvalColumn)
161
+ if c.displayed_by_default and not c.hidden and not c.never_hidden
162
+ ],
163
+ label="Select columns to show",
164
+ elem_id="column-select",
165
+ interactive=True,
166
+ )
167
+ with gr.Row():
168
+ deleted_models_visibility = gr.Checkbox(
169
+ value=False, label="Show gated/private/deleted models", interactive=True
170
+ )
171
+ with gr.Column(min_width=320):
172
+ #with gr.Box(elem_id="box-filter"):
173
+ filter_columns_type = gr.CheckboxGroup(
174
+ label="Model types",
175
+ choices=[t.to_str() for t in ModelType],
176
+ value=[t.to_str() for t in ModelType],
177
+ interactive=True,
178
+ elem_id="filter-columns-type",
179
+ )
180
+ filter_columns_precision = gr.CheckboxGroup(
181
+ label="Precision",
182
+ choices=[i.value.name for i in Precision],
183
+ value=[i.value.name for i in Precision],
184
+ interactive=True,
185
+ elem_id="filter-columns-precision",
186
+ )
187
+ filter_columns_size = gr.CheckboxGroup(
188
+ label="Model sizes (in billions of parameters)",
189
+ choices=list(NUMERIC_INTERVALS.keys()),
190
+ value=list(NUMERIC_INTERVALS.keys()),
191
+ interactive=True,
192
+ elem_id="filter-columns-size",
193
+ )
194
+
195
+ leaderboard_table = gr.components.Dataframe(
196
+ value=leaderboard_df[
197
+ [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
198
+ + shown_columns.value
199
+ + [AutoEvalColumn.dummy.name]
200
+ ],
201
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
202
+ datatype=TYPES,
203
+ elem_id="leaderboard-table",
204
+ interactive=False,
205
+ visible=True,
206
+ column_widths=["2%", "33%"]
207
+ )
208
+
209
+ # Dummy leaderboard for handling the case when the user uses backspace key
210
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
211
+ value=original_df[COLS],
212
+ headers=COLS,
213
+ datatype=TYPES,
214
+ visible=False,
215
+ )
216
+ search_bar.submit(
217
+ update_table,
218
+ [
219
+ hidden_leaderboard_table_for_search,
220
+ shown_columns,
221
+ filter_columns_type,
222
+ filter_columns_precision,
223
+ filter_columns_size,
224
+ deleted_models_visibility,
225
+ search_bar,
226
+ ],
227
+ leaderboard_table,
228
+ )
229
+ for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
230
+ selector.change(
231
+ update_table,
232
+ [
233
+ hidden_leaderboard_table_for_search,
234
+ shown_columns,
235
+ filter_columns_type,
236
+ filter_columns_precision,
237
+ filter_columns_size,
238
+ deleted_models_visibility,
239
+ search_bar,
240
+ ],
241
+ leaderboard_table,
242
+ queue=True,
243
+ )
244
+
245
+ with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
246
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
247
+
248
+ with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
249
+ with gr.Column():
250
+ with gr.Row():
251
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
252
+
253
+ with gr.Column():
254
+ with gr.Accordion(
255
+ f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
256
+ open=False,
257
+ ):
258
+ with gr.Row():
259
+ finished_eval_table = gr.components.Dataframe(
260
+ value=finished_eval_queue_df,
261
+ headers=EVAL_COLS,
262
+ datatype=EVAL_TYPES,
263
+ row_count=5,
264
+ )
265
+ with gr.Accordion(
266
+ f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
267
+ open=False,
268
+ ):
269
+ with gr.Row():
270
+ running_eval_table = gr.components.Dataframe(
271
+ value=running_eval_queue_df,
272
+ headers=EVAL_COLS,
273
+ datatype=EVAL_TYPES,
274
+ row_count=5,
275
+ )
276
+
277
+ with gr.Accordion(
278
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
279
+ open=False,
280
+ ):
281
+ with gr.Row():
282
+ pending_eval_table = gr.components.Dataframe(
283
+ value=pending_eval_queue_df,
284
+ headers=EVAL_COLS,
285
+ datatype=EVAL_TYPES,
286
+ row_count=5,
287
+ )
288
+ with gr.Row():
289
+ gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
290
+
291
+ with gr.Row():
292
+ with gr.Column():
293
+ model_name_textbox = gr.Textbox(label="Model name")
294
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
295
+ model_type = gr.Dropdown(
296
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
297
+ label="Model type",
298
+ multiselect=False,
299
+ value=None,
300
+ interactive=True,
301
+ )
302
+
303
+ with gr.Column():
304
+ precision = gr.Dropdown(
305
+ choices=[i.value.name for i in Precision if i != Precision.Unknown],
306
+ label="Precision",
307
+ multiselect=False,
308
+ value="float16",
309
+ interactive=True,
310
+ )
311
+ weight_type = gr.Dropdown(
312
+ choices=[i.value.name for i in WeightType],
313
+ label="Weights type",
314
+ multiselect=False,
315
+ value="Original",
316
+ interactive=True,
317
+ )
318
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
319
+
320
+ submit_button = gr.Button("Submit Eval")
321
+ submission_result = gr.Markdown()
322
+ submit_button.click(
323
+ add_new_eval,
324
+ [
325
+ model_name_textbox,
326
+ base_model_name_textbox,
327
+ revision_name_textbox,
328
+ precision,
329
+ weight_type,
330
+ model_type,
331
+ ],
332
+ submission_result,
333
+ )
334
+
335
+ with gr.Row():
336
+ with gr.Accordion("📙 Citation", open=False):
337
+ citation_button = gr.Textbox(
338
+ value=CITATION_BUTTON_TEXT,
339
+ label=CITATION_BUTTON_LABEL,
340
+ lines=20,
341
+ elem_id="citation-button",
342
+ show_copy_button=True,
343
+ )
344
+
345
+ scheduler = BackgroundScheduler()
346
+ scheduler.add_job(restart_space, "interval", seconds=1800)
347
+ scheduler.start()
348
+ demo.queue(default_concurrency_limit=40).launch()
kyrgyz_results/results_Qwen__Qwen2.5-0.5B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-0.5B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.254
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.54
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.49700000000000005
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.61
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.259
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.022000000000000002
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.33399999999999996
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-0.5B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-0.5B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.27399999999999997
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.532
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.515
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.379
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.146
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.335
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-1.5B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.287
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.675
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.501
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.58
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.265
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.061
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.32899999999999996
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-1.5B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.27899999999999997
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.605
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.501
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.386
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.22899999999999998
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.325
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-3B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-3B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.34
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.732
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.513
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.574
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.237
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.095
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.344
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-3B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-3B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.28600000000000003
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.66
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.505
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.594
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.22
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.342
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-7B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-7B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.385
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.748
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.504
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.6459999999999999
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.17800000000000002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.321
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.36200000000000004
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen2.5-7B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen2.5-7B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.315
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.7
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.48700000000000004
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.563
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.1
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.011000000000000001
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.341
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-0.6B_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-0.6B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.268
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.595
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.501
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.601
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.264
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.043
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.3
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-0.6B_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-0.6B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.26
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.618
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.498
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.38
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.111
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.299
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-1.7B_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-1.7B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.308
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.7120000000000001
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.486
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.62
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.252
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.185
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.303
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-1.7B_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-1.7B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.27899999999999997
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.618
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.489
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.40399999999999997
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.24600000000000002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.29600000000000004
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-4B_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-4B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.385
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.772
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.48100000000000004
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.74
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.247
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.515
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.325
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-4B_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-4B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.303
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.682
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.49
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.38299999999999995
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.245
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.32899999999999996
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-8B_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-8B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.445
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.818
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.506
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.769
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.264
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.6
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.358
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_Qwen__Qwen3-8B_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "Qwen/Qwen3-8B",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.321
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.718
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.51
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.392
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.24600000000000002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.34700000000000003
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-1b-it_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-1b-it",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.265
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.38
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.489
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.628
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.235
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.032
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.313
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-1b-it_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-1b-it",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.267
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.5820000000000001
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.5
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.379
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.244
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.34
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-270m_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-270m",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.27
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.532
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.48700000000000004
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.615
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.276
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.013999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.366
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-270m_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-270m",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.275
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.568
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.483
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.379
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.174
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.34700000000000003
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-4b-it_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-4b-it",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.295
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.25
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.496
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.621
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.24600000000000002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.0
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.5
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_google__gemma-3-4b-it_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "google/gemma-3-4b-it",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.303
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.7020000000000001
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.506
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.583
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.24600000000000002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.34700000000000003
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.1-8B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.1-8B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.381
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.805
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.516
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.755
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.21899999999999997
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.37
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.344
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.1-8B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.1-8B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.31
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.752
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.506
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.503
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.266
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.337
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.2-1B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.2-1B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.261
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.45799999999999996
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.49700000000000005
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.62
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.258
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.027000000000000003
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.303
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.2-1B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.2-1B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.263
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.5820000000000001
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.494
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.38299999999999995
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.002
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.301
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.2-3B-Instruct_few_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.2-3B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.294
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.648
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.489
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.623
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.253
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.129
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.32899999999999996
28
+ }
29
+ }
30
+ }
kyrgyz_results/results_meta-llama__Llama-3.2-3B-Instruct_zero_shot.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "model_name": "meta-llama/Llama-3.2-3B-Instruct",
4
+ "model_dtype": "float16",
5
+ "model_sha": "main"
6
+ },
7
+ "results": {
8
+ "KyrgyzMMLU": {
9
+ "metric_name": 0.278
10
+ },
11
+ "KyrgyzRC": {
12
+ "metric_name": 0.642
13
+ },
14
+ "WinoGrande": {
15
+ "metric_name": 0.491
16
+ },
17
+ "BoolQ": {
18
+ "metric_name": 0.431
19
+ },
20
+ "HellaSwag": {
21
+ "metric_name": 0.245
22
+ },
23
+ "GSM8K": {
24
+ "metric_name": 0.006999999999999999
25
+ },
26
+ "TruthfulQA": {
27
+ "metric_name": 0.315
28
+ }
29
+ }
30
+ }
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ APScheduler==3.10.1
2
+ black==23.11.0
3
+ click==8.1.3
4
+ datasets==2.14.5
5
+ gradio==4.43.0
6
+ gradio-client==1.3.0
7
+ huggingface-hub>=0.18.0
8
+ matplotlib==3.7.1
9
+ numpy==1.24.2
10
+ pandas==2.0.0
11
+ python-dateutil==2.8.2
12
+ requests
13
+ tqdm==4.65.0
14
+ transformers==4.35.2
15
+ tokenizers>=0.15.0
scripts/create_request_file.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pprint
4
+ import re
5
+ from datetime import datetime, timezone
6
+
7
+ import click
8
+ from colorama import Fore
9
+ from huggingface_hub import HfApi, snapshot_download
10
+
11
+ EVAL_REQUESTS_PATH = "eval-queue"
12
+ QUEUE_REPO = "open-llm-leaderboard/requests"
13
+
14
+ precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
15
+ model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
16
+ weight_types = ("Original", "Delta", "Adapter")
17
+
18
+
19
+ def get_model_size(model_info, precision: str):
20
+ size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
21
+ try:
22
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
23
+ except (AttributeError, TypeError):
24
+ try:
25
+ size_match = re.search(size_pattern, model_info.modelId.lower())
26
+ model_size = size_match.group(0)
27
+ model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
28
+ except AttributeError:
29
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
30
+
31
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
32
+ model_size = size_factor * model_size
33
+ return model_size
34
+
35
+
36
+ def main():
37
+ api = HfApi()
38
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
39
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset")
40
+
41
+ model_name = click.prompt("Enter model name")
42
+ revision = click.prompt("Enter revision", default="main")
43
+ precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
44
+ model_type = click.prompt("Enter model type", type=click.Choice(model_types))
45
+ weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
46
+ base_model = click.prompt("Enter base model", default="")
47
+ status = click.prompt("Enter status", default="FINISHED")
48
+
49
+ try:
50
+ model_info = api.model_info(repo_id=model_name, revision=revision)
51
+ except Exception as e:
52
+ print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
53
+ return 1
54
+
55
+ model_size = get_model_size(model_info=model_info, precision=precision)
56
+
57
+ try:
58
+ license = model_info.cardData["license"]
59
+ except Exception:
60
+ license = "?"
61
+
62
+ eval_entry = {
63
+ "model": model_name,
64
+ "base_model": base_model,
65
+ "revision": revision,
66
+ "private": False,
67
+ "precision": precision,
68
+ "weight_type": weight_type,
69
+ "status": status,
70
+ "submitted_time": current_time,
71
+ "model_type": model_type,
72
+ "likes": model_info.likes,
73
+ "params": model_size,
74
+ "license": license,
75
+ }
76
+
77
+ user_name = ""
78
+ model_path = model_name
79
+ if "/" in model_name:
80
+ user_name = model_name.split("/")[0]
81
+ model_path = model_name.split("/")[1]
82
+
83
+ pprint.pprint(eval_entry)
84
+
85
+ if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
86
+ click.echo("continuing...")
87
+
88
+ out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
89
+ os.makedirs(out_dir, exist_ok=True)
90
+ out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
91
+
92
+ with open(out_path, "w") as f:
93
+ f.write(json.dumps(eval_entry))
94
+
95
+ api.upload_file(
96
+ path_or_fileobj=out_path,
97
+ path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
98
+ repo_id=QUEUE_REPO,
99
+ repo_type="dataset",
100
+ commit_message=f"Add {model_name} to eval queue",
101
+ )
102
+ else:
103
+ click.echo("aborting...")
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()
scripts/generate_kyrgyz_results_json.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Dict, List
4
+
5
+
6
+ # Maps display names in README to real Hub model IDs for better visibility (still_on_hub=True)
7
+ MODEL_ID_MAP = {
8
+ # Qwen
9
+ "Qwen2.5-0.5B-Instruct": "Qwen/Qwen2.5-0.5B-Instruct",
10
+ "Qwen2.5-1.5B-Instruct": "Qwen/Qwen2.5-1.5B-Instruct",
11
+ "Qwen2.5-3B-Instruct": "Qwen/Qwen2.5-3B-Instruct",
12
+ "Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct",
13
+ "Qwen3-0.6B": "Qwen/Qwen3-0.6B",
14
+ "Qwen3-1.7B": "Qwen/Qwen3-1.7B",
15
+ "Qwen3-4B": "Qwen/Qwen3-4B",
16
+ "Qwen3-8B": "Qwen/Qwen3-8B",
17
+ # Gemma
18
+ "gemma-3-270m": "google/gemma-3-270m",
19
+ "gemma-3-1b-it": "google/gemma-3-1b-it",
20
+ "gemma-3-4b-it": "google/gemma-3-4b-it",
21
+ # Llama
22
+ "Llama-3.1-8B-Instruct": "meta-llama/Llama-3.1-8B-Instruct",
23
+ "Llama-3.2-1B-Instruct": "meta-llama/Llama-3.2-1B-Instruct",
24
+ "Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct",
25
+ }
26
+
27
+
28
+ TASK_KEYS = [
29
+ "KyrgyzMMLU",
30
+ "KyrgyzRC",
31
+ "WinoGrande",
32
+ "BoolQ",
33
+ "HellaSwag",
34
+ "GSM8K",
35
+ "TruthfulQA",
36
+ ]
37
+
38
+
39
+ # Percent values from README tables (Zero-shot)
40
+ ZERO_SHOT: Dict[str, Dict[str, float]] = {
41
+ # Qwen
42
+ "Qwen2.5-0.5B-Instruct": {"KyrgyzMMLU": 27.4, "KyrgyzRC": 53.2, "WinoGrande": 51.5, "BoolQ": 37.9, "HellaSwag": 14.6, "GSM8K": 0.7, "TruthfulQA": 33.5},
43
+ "Qwen2.5-1.5B-Instruct": {"KyrgyzMMLU": 27.9, "KyrgyzRC": 60.5, "WinoGrande": 50.1, "BoolQ": 38.6, "HellaSwag": 22.9, "GSM8K": 0.7, "TruthfulQA": 32.5},
44
+ "Qwen2.5-3B-Instruct": {"KyrgyzMMLU": 28.6, "KyrgyzRC": 66.0, "WinoGrande": 50.5, "BoolQ": 59.4, "HellaSwag": 22.0, "GSM8K": 0.7, "TruthfulQA": 34.2},
45
+ "Qwen2.5-7B-Instruct": {"KyrgyzMMLU": 31.5, "KyrgyzRC": 70.0, "WinoGrande": 48.7, "BoolQ": 56.3, "HellaSwag": 10.0, "GSM8K": 1.1, "TruthfulQA": 34.1},
46
+ "Qwen3-0.6B": {"KyrgyzMMLU": 26.0, "KyrgyzRC": 61.8, "WinoGrande": 49.8, "BoolQ": 38.0, "HellaSwag": 11.1, "GSM8K": 0.7, "TruthfulQA": 29.9},
47
+ "Qwen3-1.7B": {"KyrgyzMMLU": 27.9, "KyrgyzRC": 61.8, "WinoGrande": 48.9, "BoolQ": 40.4, "HellaSwag": 24.6, "GSM8K": 0.7, "TruthfulQA": 29.6},
48
+ "Qwen3-4B": {"KyrgyzMMLU": 30.3, "KyrgyzRC": 68.2, "WinoGrande": 49.0, "BoolQ": 38.3, "HellaSwag": 24.5, "GSM8K": 0.7, "TruthfulQA": 32.9},
49
+ "Qwen3-8B": {"KyrgyzMMLU": 32.1, "KyrgyzRC": 71.8, "WinoGrande": 51.0, "BoolQ": 39.2, "HellaSwag": 24.6, "GSM8K": 0.7, "TruthfulQA": 34.7},
50
+ # Gemma
51
+ "gemma-3-1b-it": {"KyrgyzMMLU": 26.7, "KyrgyzRC": 58.2, "WinoGrande": 50.0, "BoolQ": 37.9, "HellaSwag": 24.4, "GSM8K": 0.7, "TruthfulQA": 34.0},
52
+ "gemma-3-270m": {"KyrgyzMMLU": 27.5, "KyrgyzRC": 56.8, "WinoGrande": 48.3, "BoolQ": 37.9, "HellaSwag": 17.4, "GSM8K": 0.7, "TruthfulQA": 34.7},
53
+ "gemma-3-4b-it": {"KyrgyzMMLU": 30.3, "KyrgyzRC": 70.2, "WinoGrande": 50.6, "BoolQ": 58.3, "HellaSwag": 24.6, "GSM8K": 0.7, "TruthfulQA": 34.7},
54
+ # Llama
55
+ "Llama-3.1-8B-Instruct": {"KyrgyzMMLU": 31.0, "KyrgyzRC": 75.2, "WinoGrande": 50.6, "BoolQ": 50.3, "HellaSwag": 26.6, "GSM8K": 0.7, "TruthfulQA": 33.7},
56
+ "Llama-3.2-1B-Instruct": {"KyrgyzMMLU": 26.3, "KyrgyzRC": 58.2, "WinoGrande": 49.4, "BoolQ": 38.3, "HellaSwag": 0.2, "GSM8K": 0.7, "TruthfulQA": 30.1},
57
+ "Llama-3.2-3B-Instruct": {"KyrgyzMMLU": 27.8, "KyrgyzRC": 64.2, "WinoGrande": 49.1, "BoolQ": 43.1, "HellaSwag": 24.5, "GSM8K": 0.7, "TruthfulQA": 31.5},
58
+ }
59
+
60
+
61
+ # Percent values from README tables (Few-shot)
62
+ FEW_SHOT: Dict[str, Dict[str, float]] = {
63
+ # Qwen
64
+ "Qwen2.5-0.5B-Instruct": {"KyrgyzMMLU": 25.4, "KyrgyzRC": 54.0, "WinoGrande": 49.7, "BoolQ": 61.0, "HellaSwag": 25.9, "GSM8K": 2.2, "TruthfulQA": 33.4},
65
+ "Qwen2.5-1.5B-Instruct": {"KyrgyzMMLU": 28.7, "KyrgyzRC": 67.5, "WinoGrande": 50.1, "BoolQ": 58.0, "HellaSwag": 26.5, "GSM8K": 6.1, "TruthfulQA": 32.9},
66
+ "Qwen2.5-3B-Instruct": {"KyrgyzMMLU": 34.0, "KyrgyzRC": 73.2, "WinoGrande": 51.3, "BoolQ": 57.4, "HellaSwag": 23.7, "GSM8K": 9.5, "TruthfulQA": 34.4},
67
+ "Qwen2.5-7B-Instruct": {"KyrgyzMMLU": 38.5, "KyrgyzRC": 74.8, "WinoGrande": 50.4, "BoolQ": 64.6, "HellaSwag": 17.8, "GSM8K": 32.1, "TruthfulQA": 36.2},
68
+ "Qwen3-0.6B": {"KyrgyzMMLU": 26.8, "KyrgyzRC": 59.5, "WinoGrande": 50.1, "BoolQ": 60.1, "HellaSwag": 26.4, "GSM8K": 4.3, "TruthfulQA": 30.0},
69
+ "Qwen3-1.7B": {"KyrgyzMMLU": 30.8, "KyrgyzRC": 71.2, "WinoGrande": 48.6, "BoolQ": 62.0, "HellaSwag": 25.2, "GSM8K": 18.5, "TruthfulQA": 30.3},
70
+ "Qwen3-4B": {"KyrgyzMMLU": 38.5, "KyrgyzRC": 77.2, "WinoGrande": 48.1, "BoolQ": 74.0, "HellaSwag": 24.7, "GSM8K": 51.5, "TruthfulQA": 32.5},
71
+ "Qwen3-8B": {"KyrgyzMMLU": 44.5, "KyrgyzRC": 81.8, "WinoGrande": 50.6, "BoolQ": 76.9, "HellaSwag": 26.4, "GSM8K": 60.0, "TruthfulQA": 35.8},
72
+ # Gemma
73
+ "gemma-3-1b-it": {"KyrgyzMMLU": 26.5, "KyrgyzRC": 38.0, "WinoGrande": 48.9, "BoolQ": 62.8, "HellaSwag": 23.5, "GSM8K": 3.2, "TruthfulQA": 31.3},
74
+ "gemma-3-270m": {"KyrgyzMMLU": 27.0, "KyrgyzRC": 53.2, "WinoGrande": 48.7, "BoolQ": 61.5, "HellaSwag": 27.6, "GSM8K": 1.4, "TruthfulQA": 36.6},
75
+ "gemma-3-4b-it": {"KyrgyzMMLU": 29.5, "KyrgyzRC": 25.0, "WinoGrande": 49.6, "BoolQ": 62.1, "HellaSwag": 24.6, "GSM8K": 0.0, "TruthfulQA": 50.0},
76
+ # Llama
77
+ "Llama-3.1-8B-Instruct": {"KyrgyzMMLU": 38.1, "KyrgyzRC": 80.5, "WinoGrande": 51.6, "BoolQ": 75.5, "HellaSwag": 21.9, "GSM8K": 37.0, "TruthfulQA": 34.4},
78
+ "Llama-3.2-1B-Instruct": {"KyrgyzMMLU": 26.1, "KyrgyzRC": 45.8, "WinoGrande": 49.7, "BoolQ": 62.0, "HellaSwag": 25.8, "GSM8K": 2.7, "TruthfulQA": 30.3},
79
+ "Llama-3.2-3B-Instruct": {"KyrgyzMMLU": 29.4, "KyrgyzRC": 64.8, "WinoGrande": 48.9, "BoolQ": 62.3, "HellaSwag": 25.3, "GSM8K": 12.9, "TruthfulQA": 32.9},
80
+ }
81
+
82
+
83
+ def to_result_json(model_id: str, metrics: Dict[str, float]) -> Dict:
84
+ return {
85
+ "config": {
86
+ "model_name": model_id,
87
+ "model_dtype": "float16",
88
+ "model_sha": "main",
89
+ },
90
+ "results": {k: {"metric_name": (v / 100.0) if v is not None else None} for k, v in metrics.items()},
91
+ }
92
+
93
+
94
+ def write_results(out_dir: str, table: Dict[str, Dict[str, float]], tag: str):
95
+ os.makedirs(out_dir, exist_ok=True)
96
+ for display_name, metrics in table.items():
97
+ model_id = MODEL_ID_MAP.get(display_name, f"TTimur/{display_name}")
98
+ payload = to_result_json(model_id, metrics)
99
+ # Filename convention similar to other spaces; any name ending with .json is fine
100
+ safe_model = model_id.replace("/", "__")
101
+ out_path = os.path.join(out_dir, f"results_{safe_model}_{tag}.json")
102
+ with open(out_path, "w") as f:
103
+ json.dump(payload, f, ensure_ascii=False, indent=2)
104
+ print(f"Wrote {out_path}")
105
+
106
+
107
+ def main():
108
+ out_dir = os.environ.get("OUT_DIR", "./kyrgyz_results")
109
+ write_results(out_dir, ZERO_SHOT, "zero_shot")
110
+ write_results(out_dir, FEW_SHOT, "few_shot")
111
+ print("Done. Upload generated JSONs to your dataset repo (e.g., TTimur/results_kg_v0.1)")
112
+
113
+
114
+ if __name__ == "__main__":
115
+ main()
116
+
117
+
src/display/about.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ @dataclass
5
+ class Task:
6
+ benchmark: str
7
+ metric: str
8
+ col_name: str
9
+
10
+
11
+ # Init: to update with your specific keys
12
+ class Tasks(Enum):
13
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
14
+ task0 = Task("KyrgyzMMLU", "metric_name", "KyrgyzMMLU")
15
+ task1 = Task("KyrgyzRC", "metric_name", "KyrgyzRC")
16
+ task2 = Task("WinoGrande", "metric_name", "WinoGrande")
17
+ task3 = Task("BoolQ", "metric_name", "BoolQ")
18
+ task4 = Task("HellaSwag", "metric_name", "HellaSwag")
19
+ task5 = Task("GSM8K", "metric_name", "GSM8K")
20
+ task6 = Task("TruthfulQA", "metric_name", "TruthfulQA")
21
+
22
+
23
+ # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title"> OpenLLM Kyrgyz Leaderboard v0.1</h1>"""
25
+
26
+ # What does your leaderboard evaluate?
27
+ INTRODUCTION_TEXT = """
28
+ Welcome to the Kyrgyz LLM Leaderboard — a dedicated platform for evaluating Large Language Models on Kyrgyz benchmarks. This space highlights models that perform well in Kyrgyz and helps advance research and tooling for low-resource languages.
29
+
30
+ Benchmarks include native Kyrgyz tasks (KyrgyzMMLU, KyrgyzRC) and carefully translated sets (WinoGrande, BoolQ, HellaSwag, GSM8K, TruthfulQA). Scores are comparable across models and settings.
31
+
32
+ 🚀 Submit Your Model 🚀
33
+
34
+ Have a Kyrgyz-capable model? Submit it for evaluation (currently manual) and help build a stronger Kyrgyz NLP ecosystem. See the About tab for details.
35
+ """
36
+
37
+ # Which evaluations are you running? how can people reproduce what you have?
38
+ LLM_BENCHMARKS_TEXT = f"""
39
+ ## How it works
40
+
41
+ ## Reproducibility
42
+
43
+ This leaderboard aggregates results from Kyrgyz benchmarks. Datasets are hosted on the Hugging Face Hub under `TTimur`:
44
+ - KyrgyzMMLU: `TTimur/kyrgyzMMLU`
45
+ - KyrgyzRC: `TTimur/kyrgyzRC`
46
+ - WinoGrande (KY): `TTimur/winogrande_kg`
47
+ - BoolQ (KY): `TTimur/boolq_kg`
48
+ - HellaSwag (KY): `TTimur/hellaswag_kg`
49
+ - GSM8K (KY): `TTimur/gsm8k_kg`
50
+ - TruthfulQA (KY): `TTimur/truthfulqa_kg`
51
+
52
+ You can evaluate using your preferred evaluation harness (e.g., Lighteval or EleutherAI's lm-evaluation-harness) with Kyrgyz tasks enabled and then upload the resulting JSON to the results dataset for this Space.
53
+
54
+ Notes:
55
+ - Metrics reported are accuracy (or QEM for GSM8K), aligned with the dataset conventions.
56
+ - Ensure you use consistent few-shot settings when comparing models.
57
+ """
58
+
59
+ EVALUATION_QUEUE_TEXT = """
60
+ ## Some good practices before submitting a model
61
+
62
+ ### 1) Make sure you can load your model and tokenizer using AutoClasses:
63
+ ```python
64
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
65
+ config = AutoConfig.from_pretrained("your model name", revision=revision)
66
+ model = AutoModel.from_pretrained("your model name", revision=revision)
67
+ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
68
+ ```
69
+ If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
70
+
71
+ Note: make sure your model is public!
72
+ Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
73
+
74
+ ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
75
+ It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
76
+
77
+ ### 3) Make sure your model has an open license!
78
+ This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
79
+
80
+ ### 4) Fill up your model card
81
+ When we add extra information about models to the leaderboard, it will be automatically taken from the model card
82
+
83
+ ## In case of model failure
84
+ If your model is displayed in the `FAILED` category, its execution stopped.
85
+ Make sure you have followed the above steps first.
86
+ If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
87
+ """
88
+
89
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
90
+ CITATION_BUTTON_TEXT = r"""
91
+ @article{KyrgyzLLM-Bench,
92
+ title={Bridging the Gap in Less-Resourced Languages: Building a Benchmark for Kyrgyz Language Models},
93
+ author={Timur Turatali, Aida Turdubaeva, Islam Zhenishbekov, Zhoomart Suranbaev, Anton Alekseev, Rustem Izmailov},
94
+ year={2025},
95
+ url={https://huggingface.co/datasets/TTimur/kyrgyzMMLU,
96
+ https://huggingface.co/datasets/TTimur/kyrgyzRC,
97
+ https://huggingface.co/datasets/TTimur/winogrande_kg,
98
+ https://huggingface.co/datasets/TTimur/boolq_kg,
99
+ https://huggingface.co/datasets/TTimur/truthfulqa_kg,
100
+ https://huggingface.co/datasets/TTimur/gsm8k_kg,
101
+ https://huggingface.co/datasets/TTimur/hellaswag_kg}
102
+ }
103
+ """
src/display/css_html_js.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ #models-to-add-text {
8
+ font-size: 18px !important;
9
+ }
10
+
11
+ #citation-button span {
12
+ font-size: 16px !important;
13
+ }
14
+
15
+ #citation-button textarea {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button > label > button {
20
+ margin: 6px;
21
+ transform: scale(1.3);
22
+ }
23
+
24
+ #leaderboard-table {
25
+ margin-top: 15px
26
+ }
27
+
28
+ #leaderboard-table-lite {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #search-bar-table-box > div:first-child {
33
+ background: none;
34
+ border: none;
35
+ }
36
+
37
+ #search-bar {
38
+ padding: 0px;
39
+ }
40
+
41
+ /* Hides the final AutoEvalColumn */
42
+ #llm-benchmark-tab-table table td:last-child,
43
+ #llm-benchmark-tab-table table th:last-child {
44
+ display: none;
45
+ }
46
+
47
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
48
+ table td:first-child,
49
+ table th:first-child {
50
+ max-width: 400px;
51
+ overflow: auto;
52
+ white-space: nowrap;
53
+ }
54
+
55
+ .tab-buttons button {
56
+ font-size: 20px;
57
+ }
58
+
59
+ #scale-logo {
60
+ border-style: none !important;
61
+ box-shadow: none;
62
+ display: block;
63
+ margin-left: auto;
64
+ margin-right: auto;
65
+ max-width: 600px;
66
+ }
67
+
68
+ #scale-logo .download {
69
+ display: none;
70
+ }
71
+ #filter_type{
72
+ border: 0;
73
+ padding-left: 0;
74
+ padding-top: 0;
75
+ }
76
+ #filter_type label {
77
+ display: flex;
78
+ }
79
+ #filter_type label > span{
80
+ margin-top: var(--spacing-lg);
81
+ margin-right: 0.5em;
82
+ }
83
+ #filter_type label > .wrap{
84
+ width: 103px;
85
+ }
86
+ #filter_type label > .wrap .wrap-inner{
87
+ padding: 2px;
88
+ }
89
+ #filter_type label > .wrap .wrap-inner input{
90
+ width: 1px
91
+ }
92
+ #filter-columns-type{
93
+ border:0;
94
+ padding:0.5;
95
+ }
96
+ #filter-columns-size{
97
+ border:0;
98
+ padding:0.5;
99
+ }
100
+ #box-filter > .form{
101
+ border: 0
102
+ }
103
+ """
104
+
105
+ get_window_url_params = """
106
+ function(url_params) {
107
+ const params = new URLSearchParams(window.location.search);
108
+ url_params = Object.fromEntries(params);
109
+ return url_params;
110
+ }
111
+ """
src/display/formatting.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime, timezone
3
+
4
+ from huggingface_hub import HfApi
5
+ from huggingface_hub.hf_api import ModelInfo
6
+
7
+
8
+ API = HfApi()
9
+
10
+ def model_hyperlink(link, model_name):
11
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
12
+
13
+
14
+ def make_clickable_model(model_name):
15
+ link = f"https://huggingface.co/{model_name}"
16
+ return model_hyperlink(link, model_name)
17
+
18
+
19
+ def styled_error(error):
20
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
21
+
22
+
23
+ def styled_warning(warn):
24
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
25
+
26
+
27
+ def styled_message(message):
28
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
29
+
30
+
31
+ def has_no_nan_values(df, columns):
32
+ return df[columns].notna().all(axis=1)
33
+
34
+
35
+ def has_nan_values(df, columns):
36
+ return df[columns].isna().any(axis=1)
src/display/utils.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, make_dataclass
2
+ from enum import Enum
3
+
4
+ import pandas as pd
5
+
6
+ from src.display.about import Tasks
7
+
8
+ def fields(raw_class):
9
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
+
11
+
12
+ # These classes are for user facing column names,
13
+ # to avoid having to change them all around the code
14
+ # when a modif is needed
15
+ @dataclass
16
+ class ColumnContent:
17
+ name: str
18
+ type: str
19
+ displayed_by_default: bool
20
+ hidden: bool = False
21
+ never_hidden: bool = False
22
+ dummy: bool = False
23
+
24
+ ## Leaderboard columns
25
+ auto_eval_column_dict = []
26
+ # Init
27
+ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
28
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
29
+ #Scores
30
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
31
+ for task in Tasks:
32
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
33
+ # Model information
34
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
35
+ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
36
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
37
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
38
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
39
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
40
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
41
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
42
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
43
+ # Dummy column for the search bar (hidden by the custom CSS)
44
+ auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
45
+
46
+ # We use make dataclass to dynamically fill the scores from Tasks
47
+ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
48
+
49
+ ## For the queue columns in the submission tab
50
+ @dataclass(frozen=True)
51
+ class EvalQueueColumn: # Queue column
52
+ model = ColumnContent("model", "markdown", True)
53
+ revision = ColumnContent("revision", "str", True)
54
+ private = ColumnContent("private", "bool", True)
55
+ precision = ColumnContent("precision", "str", True)
56
+ weight_type = ColumnContent("weight_type", "str", "Original")
57
+ status = ColumnContent("status", "str", True)
58
+
59
+ ## All the model information that we might need
60
+ @dataclass
61
+ class ModelDetails:
62
+ name: str
63
+ display_name: str = ""
64
+ symbol: str = "" # emoji
65
+
66
+
67
+ class ModelType(Enum):
68
+ PT = ModelDetails(name="pretrained", symbol="🟢")
69
+ FT = ModelDetails(name="fine-tuned", symbol="🔶")
70
+ IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
71
+ RL = ModelDetails(name="RL-tuned", symbol="🟦")
72
+ Unknown = ModelDetails(name="", symbol="?")
73
+
74
+ def to_str(self, separator=" "):
75
+ return f"{self.value.symbol}{separator}{self.value.name}"
76
+
77
+ @staticmethod
78
+ def from_str(type):
79
+ if "fine-tuned" in type or "🔶" in type:
80
+ return ModelType.FT
81
+ if "pretrained" in type or "🟢" in type:
82
+ return ModelType.PT
83
+ if "RL-tuned" in type or "🟦" in type:
84
+ return ModelType.RL
85
+ if "instruction-tuned" in type or "⭕" in type:
86
+ return ModelType.IFT
87
+ return ModelType.Unknown
88
+
89
+ class WeightType(Enum):
90
+ Adapter = ModelDetails("Adapter")
91
+ Original = ModelDetails("Original")
92
+ Delta = ModelDetails("Delta")
93
+
94
+ class Precision(Enum):
95
+ float16 = ModelDetails("float16")
96
+ bfloat16 = ModelDetails("bfloat16")
97
+ qt_8bit = ModelDetails("8bit")
98
+ qt_4bit = ModelDetails("4bit")
99
+ qt_GPTQ = ModelDetails("GPTQ")
100
+ Unknown = ModelDetails("?")
101
+
102
+ def from_str(precision):
103
+ if precision in ["torch.float16", "float16"]:
104
+ return Precision.float16
105
+ if precision in ["torch.bfloat16", "bfloat16"]:
106
+ return Precision.bfloat16
107
+ if precision in ["8bit"]:
108
+ return Precision.qt_8bit
109
+ if precision in ["4bit"]:
110
+ return Precision.qt_4bit
111
+ if precision in ["GPTQ", "None"]:
112
+ return Precision.qt_GPTQ
113
+ return Precision.Unknown
114
+
115
+ # Column selection
116
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
117
+ TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
118
+ COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
119
+ TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden]
120
+
121
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
122
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
123
+
124
+ BENCHMARK_COLS = [t.value.col_name for t in Tasks]
125
+
126
+ NUMERIC_INTERVALS = {
127
+ "?": pd.Interval(-1, 0, closed="right"),
128
+ "~1.5": pd.Interval(0, 2, closed="right"),
129
+ "~3": pd.Interval(2, 4, closed="right"),
130
+ "~7": pd.Interval(4, 9, closed="right"),
131
+ "~13": pd.Interval(9, 20, closed="right"),
132
+ "~35": pd.Interval(20, 45, closed="right"),
133
+ "~60": pd.Interval(45, 70, closed="right"),
134
+ "70+": pd.Interval(70, 10000, closed="right"),
135
+ }
src/envs.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+ # clone / pull the lmeh eval data
6
+ TOKEN = os.environ.get("TOKEN", None)
7
+
8
+ # Allow overriding via environment; default to Kyrgyz leaderboard repos (v0.1)
9
+ OWNER = os.environ.get("OWNER", "TTimur")
10
+ REPO_ID = os.environ.get("REPO_ID", f"{OWNER}/OpenLLMKyrgyzLeaderboard_v0.1")
11
+ QUEUE_REPO = os.environ.get("QUEUE_REPO", f"{OWNER}/requests_kg_v0.1")
12
+ RESULTS_REPO = os.environ.get("RESULTS_REPO", f"{OWNER}/results_kg_v0.1")
13
+
14
+ CACHE_PATH=os.getenv("HF_HOME", ".")
15
+
16
+ # Local caches
17
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
18
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
19
+
20
+ API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
+ from src.submission.check_validity import is_model_on_hub
13
+
14
+
15
+ @dataclass
16
+ class EvalResult:
17
+ eval_name: str # org_model_precision (uid)
18
+ full_model: str # org/model (path on hub)
19
+ org: str
20
+ model: str
21
+ revision: str # commit hash, "" if main
22
+ results: dict
23
+ precision: Precision = Precision.Unknown
24
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
25
+ weight_type: WeightType = WeightType.Original # Original or Adapter
26
+ architecture: str = "Unknown"
27
+ license: str = "?"
28
+ likes: int = 0
29
+ num_params: int = 0
30
+ date: str = "" # submission date of request file
31
+ still_on_hub: bool = False
32
+
33
+ @classmethod
34
+ def init_from_json_file(self, json_filepath):
35
+ """Inits the result from the specific model result file"""
36
+ with open(json_filepath) as fp:
37
+ data = json.load(fp)
38
+
39
+ config = data.get("config")
40
+
41
+ # Precision
42
+ precision = Precision.from_str(config.get("model_dtype"))
43
+
44
+ # Get model and org
45
+ org_and_model = config.get("model_name", config.get("model_args", None))
46
+ org_and_model = org_and_model.split("/", 1)
47
+
48
+ if len(org_and_model) == 1:
49
+ org = None
50
+ model = org_and_model[0]
51
+ result_key = f"{model}_{precision.value.name}"
52
+ else:
53
+ org = org_and_model[0]
54
+ model = org_and_model[1]
55
+ result_key = f"{org}_{model}_{precision.value.name}"
56
+ full_model = "/".join(org_and_model)
57
+
58
+ still_on_hub, _, model_config = is_model_on_hub(
59
+ full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
60
+ )
61
+ architecture = "?"
62
+ if model_config is not None:
63
+ architectures = getattr(model_config, "architectures", None)
64
+ if architectures:
65
+ architecture = ";".join(architectures)
66
+
67
+ # Extract results available in this file (some results are split in several files)
68
+ results = {}
69
+ for task in Tasks:
70
+ task = task.value
71
+
72
+ # We average all scores of a given metric (not all metrics are present in all files)
73
+ accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
74
+ if accs.size == 0 or any([acc is None for acc in accs]):
75
+ continue
76
+
77
+ mean_acc = np.mean(accs) * 100.0
78
+ results[task.benchmark] = mean_acc
79
+
80
+ return self(
81
+ eval_name=result_key,
82
+ full_model=full_model,
83
+ org=org,
84
+ model=model,
85
+ results=results,
86
+ precision=precision,
87
+ revision= config.get("model_sha", ""),
88
+ still_on_hub=still_on_hub,
89
+ architecture=architecture
90
+ )
91
+
92
+ def update_with_request_file(self, requests_path):
93
+ """Finds the relevant request file for the current model and updates info with it"""
94
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
95
+
96
+ try:
97
+ with open(request_file, "r") as f:
98
+ request = json.load(f)
99
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
100
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
101
+ self.license = request.get("license", "?")
102
+ self.likes = request.get("likes", 0)
103
+ self.num_params = request.get("params", 0)
104
+ self.date = request.get("submitted_time", "")
105
+ except Exception:
106
+ print(f"Could not find request file for {self.org}/{self.model}")
107
+
108
+ def to_dict(self):
109
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
110
+ average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
111
+ data_dict = {
112
+ "eval_name": self.eval_name, # not a column, just a save name,
113
+ AutoEvalColumn.precision.name: self.precision.value.name,
114
+ AutoEvalColumn.model_type.name: self.model_type.value.name,
115
+ AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
116
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
117
+ AutoEvalColumn.architecture.name: self.architecture,
118
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
119
+ AutoEvalColumn.dummy.name: self.full_model,
120
+ AutoEvalColumn.revision.name: self.revision,
121
+ AutoEvalColumn.average.name: average,
122
+ AutoEvalColumn.license.name: self.license,
123
+ AutoEvalColumn.likes.name: self.likes,
124
+ AutoEvalColumn.params.name: self.num_params,
125
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
126
+ }
127
+
128
+ for task in Tasks:
129
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
130
+
131
+ return data_dict
132
+
133
+
134
+ def get_request_file_for_model(requests_path, model_name, precision):
135
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
136
+ request_files = os.path.join(
137
+ requests_path,
138
+ f"{model_name}_eval_request_*.json",
139
+ )
140
+ request_files = glob.glob(request_files)
141
+
142
+ # Select correct request file (precision)
143
+ request_file = ""
144
+ request_files = sorted(request_files, reverse=True)
145
+ for tmp_request_file in request_files:
146
+ with open(tmp_request_file, "r") as f:
147
+ req_content = json.load(f)
148
+ if (
149
+ req_content["status"] in ["FINISHED"]
150
+ and req_content["precision"] == precision.split(".")[-1]
151
+ ):
152
+ request_file = tmp_request_file
153
+ return request_file
154
+
155
+
156
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
157
+ """From the path of the results folder root, extract all needed info for results"""
158
+ model_result_filepaths = []
159
+
160
+ for root, _, files in os.walk(results_path):
161
+ # We should only have json files in model results
162
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
163
+ continue
164
+
165
+ # Sort the files by date
166
+ try:
167
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
168
+ except dateutil.parser._parser.ParserError:
169
+ files = [files[-1]]
170
+
171
+ for file in files:
172
+ model_result_filepaths.append(os.path.join(root, file))
173
+
174
+ eval_results = {}
175
+ for model_result_filepath in model_result_filepaths:
176
+ # Creation of result
177
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
178
+ eval_result.update_with_request_file(requests_path)
179
+
180
+ # Store results of same eval together
181
+ eval_name = eval_result.eval_name
182
+ if eval_name in eval_results.keys():
183
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
184
+ else:
185
+ eval_results[eval_name] = eval_result
186
+
187
+ results = []
188
+ for v in eval_results.values():
189
+ try:
190
+ v.to_dict() # we test if the dict version is complete
191
+ results.append(v)
192
+ except KeyError: # not all eval values present
193
+ continue
194
+
195
+ return results
src/populate.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from src.display.formatting import has_no_nan_values, make_clickable_model
7
+ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
+ from src.leaderboard.read_evals import get_raw_eval_results
9
+
10
+
11
+ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
+ raw_data = get_raw_eval_results(results_path, requests_path)
13
+ all_data_json = [v.to_dict() for v in raw_data]
14
+
15
+ df = pd.DataFrame.from_records(all_data_json)
16
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
17
+ df = df[cols].round(decimals=2)
18
+
19
+ # filter out if any of the benchmarks have not been produced
20
+ df = df[has_no_nan_values(df, benchmark_cols)]
21
+ return raw_data, df
22
+
23
+
24
+ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
25
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
26
+ all_evals = []
27
+
28
+ for entry in entries:
29
+ if ".json" in entry:
30
+ file_path = os.path.join(save_path, entry)
31
+ with open(file_path) as fp:
32
+ data = json.load(fp)
33
+
34
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
35
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
36
+
37
+ all_evals.append(data)
38
+ elif ".md" not in entry:
39
+ # this is a folder
40
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
41
+ for sub_entry in sub_entries:
42
+ file_path = os.path.join(save_path, entry, sub_entry)
43
+ with open(file_path) as fp:
44
+ data = json.load(fp)
45
+
46
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
47
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
48
+ all_evals.append(data)
49
+
50
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
51
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
52
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
53
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
54
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
55
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
56
+ return df_finished[cols], df_running[cols], df_pending[cols]
src/submission/check_validity.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from collections import defaultdict
5
+ from datetime import datetime, timedelta, timezone
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import ModelCard
9
+ from huggingface_hub.hf_api import ModelInfo
10
+ from transformers import AutoConfig
11
+ from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config
12
+
13
+ def check_model_card(repo_id: str) -> tuple[bool, str]:
14
+ """Checks if the model card and license exist and have been filled"""
15
+ try:
16
+ card = ModelCard.load(repo_id)
17
+ except huggingface_hub.utils.EntryNotFoundError:
18
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
+
20
+ # Enforce license metadata
21
+ if card.data.license is None:
22
+ if not ("license_name" in card.data and "license_link" in card.data):
23
+ return False, (
24
+ "License not found. Please add a license to your model card using the `license` metadata or a"
25
+ " `license_name`/`license_link` pair."
26
+ )
27
+
28
+ # Enforce card content
29
+ if len(card.text) < 200:
30
+ return False, "Please add a description to your model card, it is too short."
31
+
32
+ return True, ""
33
+
34
+
35
+ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
36
+ """Makes sure the model is on the hub, and uses a valid configuration (in the latest transformers version)"""
37
+ try:
38
+ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
39
+ if test_tokenizer:
40
+ tokenizer_config = get_tokenizer_config(model_name)
41
+ if tokenizer_config is not None:
42
+ tokenizer_class_candidate = tokenizer_config.get("tokenizer_class", None)
43
+ else:
44
+ tokenizer_class_candidate = config.tokenizer_class
45
+
46
+
47
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
48
+ if tokenizer_class is None:
49
+ return (
50
+ False,
51
+ f"uses {tokenizer_class_candidate}, which is not in a transformers release, therefore not supported at the moment.",
52
+ None
53
+ )
54
+ return True, None, config
55
+
56
+ except ValueError:
57
+ return (
58
+ False,
59
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
60
+ None
61
+ )
62
+
63
+ except Exception as e:
64
+ return False, "was not found on hub!", None
65
+
66
+
67
+ def get_model_size(model_info: ModelInfo, precision: str):
68
+ """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
69
+ try:
70
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
71
+ except (AttributeError, TypeError):
72
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
73
+
74
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
75
+ model_size = size_factor * model_size
76
+ return model_size
77
+
78
+ def get_model_arch(model_info: ModelInfo):
79
+ """Gets the model architecture from the configuration"""
80
+ return model_info.config.get("architectures", "Unknown")
81
+
82
+ def already_submitted_models(requested_models_dir: str) -> set[str]:
83
+ depth = 1
84
+ file_names = []
85
+ users_to_submission_dates = defaultdict(list)
86
+
87
+ for root, _, files in os.walk(requested_models_dir):
88
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
89
+ if current_depth == depth:
90
+ for file in files:
91
+ if not file.endswith(".json"):
92
+ continue
93
+ with open(os.path.join(root, file), "r") as f:
94
+ info = json.load(f)
95
+ file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
96
+
97
+ # Select organisation
98
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
99
+ continue
100
+ organisation, _ = info["model"].split("/")
101
+ users_to_submission_dates[organisation].append(info["submitted_time"])
102
+
103
+ return set(file_names), users_to_submission_dates
src/submission/submit.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
+ from src.submission.check_validity import (
8
+ already_submitted_models,
9
+ check_model_card,
10
+ get_model_size,
11
+ is_model_on_hub,
12
+ )
13
+
14
+ REQUESTED_MODELS = None
15
+ USERS_TO_SUBMISSION_DATES = None
16
+
17
+ def add_new_eval(
18
+ model: str,
19
+ base_model: str,
20
+ revision: str,
21
+ precision: str,
22
+ weight_type: str,
23
+ model_type: str,
24
+ ):
25
+ global REQUESTED_MODELS
26
+ global USERS_TO_SUBMISSION_DATES
27
+ if not REQUESTED_MODELS:
28
+ REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
+
30
+ user_name = ""
31
+ model_path = model
32
+ if "/" in model:
33
+ user_name = model.split("/")[0]
34
+ model_path = model.split("/")[1]
35
+
36
+ precision = precision.split(" ")[0]
37
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
+
39
+ if model_type is None or model_type == "":
40
+ return styled_error("Please select a model type.")
41
+
42
+ # Does the model actually exist?
43
+ if revision == "":
44
+ revision = "main"
45
+
46
+ # Is the model on the hub?
47
+ if weight_type in ["Delta", "Adapter"]:
48
+ base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
+ if not base_model_on_hub:
50
+ return styled_error(f'Base model "{base_model}" {error}')
51
+
52
+ # Is the model info correctly filled?
53
+ try:
54
+ model_info = API.model_info(repo_id=model, revision=revision)
55
+ except Exception:
56
+ return styled_error("Could not get your model information. Please fill it up properly.")
57
+
58
+ model_size = get_model_size(model_info=model_info, precision=precision)
59
+
60
+ # Were the model card and license filled?
61
+ try:
62
+ license = model_info.cardData["license"]
63
+ except Exception:
64
+ return styled_error("Please select a license for your model")
65
+
66
+ modelcard_OK, error_msg = check_model_card(model)
67
+ if not modelcard_OK:
68
+ return styled_error(error_msg)
69
+
70
+ # Seems good, creating the eval
71
+ print("Adding new eval")
72
+
73
+ eval_entry = {
74
+ "model": model,
75
+ "base_model": base_model,
76
+ "revision": revision,
77
+ "precision": precision,
78
+ "weight_type": weight_type,
79
+ "status": "PENDING",
80
+ "submitted_time": current_time,
81
+ "model_type": model_type,
82
+ "likes": model_info.likes,
83
+ "params": model_size,
84
+ "license": license,
85
+ }
86
+
87
+ # Check for duplicate submission
88
+ if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
89
+ return styled_warning("This model has been already submitted.")
90
+
91
+ print("Creating eval file")
92
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
93
+ os.makedirs(OUT_DIR, exist_ok=True)
94
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
95
+
96
+ with open(out_path, "w") as f:
97
+ f.write(json.dumps(eval_entry))
98
+
99
+ print("Uploading eval file")
100
+ API.upload_file(
101
+ path_or_fileobj=out_path,
102
+ path_in_repo=out_path.split("eval-queue/")[1],
103
+ repo_id=QUEUE_REPO,
104
+ repo_type="dataset",
105
+ commit_message=f"Add {model} to eval queue",
106
+ )
107
+
108
+ # Remove the local file
109
+ os.remove(out_path)
110
+
111
+ return styled_message(
112
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
113
+ )