Datasets:
Add Label Studio annotation tooling and dictionary plugins for AL Cycle 1
Browse files- .gitignore +2 -1
- ls_import_cycle1.json +0 -0
- src/build_dict_plugin.py +314 -0
- src/build_dict_search.py +221 -0
- src/dict_plugin.html +0 -0
- src/dict_search.html +0 -0
- src/ls_config_ws.xml +14 -0
- src/ls_export_ws.py +227 -0
- src/ls_import_ws.py +503 -0
.gitignore
CHANGED
|
@@ -1,3 +1,4 @@
|
|
| 1 |
__pycache__/
|
| 2 |
.venv/
|
| 3 |
-
.env
|
|
|
|
|
|
| 1 |
__pycache__/
|
| 2 |
.venv/
|
| 3 |
+
.env
|
| 4 |
+
*.egg-info/
|
ls_import_cycle1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/build_dict_plugin.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.9"
|
| 3 |
+
# dependencies = []
|
| 4 |
+
# ///
|
| 5 |
+
"""Generate a self-contained HTML dictionary plugin for Label Studio.
|
| 6 |
+
|
| 7 |
+
Produces dict_plugin.html that provides:
|
| 8 |
+
1. Highlight bar: reads sentence text and predicted spans from URL fragment,
|
| 9 |
+
colors multi-syllable words green (in dict) or red (not in dict)
|
| 10 |
+
2. Search box: instant dictionary search with exact/prefix/substring matches
|
| 11 |
+
|
| 12 |
+
The full 63K-entry dictionary is embedded as a JS array for client-side lookup.
|
| 13 |
+
Per-task data is passed via URL fragment (#text=...&spans=...) so the ~900KB
|
| 14 |
+
dictionary is loaded once and cached by the browser.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
uv run src/build_dict_plugin.py --dict path/to/dictionary.txt
|
| 18 |
+
uv run src/build_dict_plugin.py # uses default path
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import argparse
|
| 22 |
+
import json
|
| 23 |
+
import unicodedata
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
DEFAULT_DICT = (
|
| 27 |
+
"/home/claude-code/projects/workspace_underthesea/tree-1/"
|
| 28 |
+
"models/word_segmentation/udd_ws_v1_1-20260211_034002/dictionary.txt"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
LS_STATIC_DIRS = [
|
| 32 |
+
"/home/claude-code/.local/share/uv/tools/label-studio/"
|
| 33 |
+
"lib/python3.12/site-packages/label_studio/core/static_build",
|
| 34 |
+
"/home/claude-code/projects/workspace_underthesea/label-studio/.venv/"
|
| 35 |
+
"lib/python3.12/site-packages/label_studio/core/static_build",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def nfc(text):
|
| 40 |
+
return unicodedata.normalize("NFC", text)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def load_dictionary(dict_path):
|
| 44 |
+
entries = []
|
| 45 |
+
with open(dict_path, encoding="utf-8") as f:
|
| 46 |
+
for line in f:
|
| 47 |
+
line = line.strip()
|
| 48 |
+
if line:
|
| 49 |
+
entries.append(nfc(line))
|
| 50 |
+
entries.sort()
|
| 51 |
+
return entries
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def build_html(entries):
|
| 55 |
+
# JSON-encode the sorted list for embedding in JS
|
| 56 |
+
dict_json = json.dumps(entries, ensure_ascii=False)
|
| 57 |
+
|
| 58 |
+
return (
|
| 59 |
+
'<!DOCTYPE html>\n'
|
| 60 |
+
'<html lang="vi">\n'
|
| 61 |
+
'<head>\n'
|
| 62 |
+
'<meta charset="UTF-8">\n'
|
| 63 |
+
'<style>\n'
|
| 64 |
+
'* { box-sizing: border-box; margin: 0; padding: 0; }\n'
|
| 65 |
+
'body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; }\n'
|
| 66 |
+
'.highlights {\n'
|
| 67 |
+
' padding: 6px 10px;\n'
|
| 68 |
+
' font-size: 15px;\n'
|
| 69 |
+
' line-height: 1.6;\n'
|
| 70 |
+
' background: #fafafa;\n'
|
| 71 |
+
' border-bottom: 1px solid #e0e0e0;\n'
|
| 72 |
+
' cursor: default;\n'
|
| 73 |
+
'}\n'
|
| 74 |
+
'.highlights .word {\n'
|
| 75 |
+
' cursor: pointer;\n'
|
| 76 |
+
'}\n'
|
| 77 |
+
'.highlights .word:hover {\n'
|
| 78 |
+
' background: #e3f2fd;\n'
|
| 79 |
+
' border-radius: 3px;\n'
|
| 80 |
+
'}\n'
|
| 81 |
+
'.highlights .in-dict {\n'
|
| 82 |
+
' background: #c8e6c9;\n'
|
| 83 |
+
' padding: 1px 3px;\n'
|
| 84 |
+
' border-radius: 3px;\n'
|
| 85 |
+
' cursor: pointer;\n'
|
| 86 |
+
'}\n'
|
| 87 |
+
'.highlights .not-dict {\n'
|
| 88 |
+
' background: #ffcdd2;\n'
|
| 89 |
+
' padding: 1px 3px;\n'
|
| 90 |
+
' border-radius: 3px;\n'
|
| 91 |
+
' text-decoration: underline;\n'
|
| 92 |
+
' text-decoration-color: red;\n'
|
| 93 |
+
' cursor: pointer;\n'
|
| 94 |
+
'}\n'
|
| 95 |
+
'.search-area {\n'
|
| 96 |
+
' padding: 6px 10px;\n'
|
| 97 |
+
'}\n'
|
| 98 |
+
'.search-box {\n'
|
| 99 |
+
' display: flex;\n'
|
| 100 |
+
' gap: 6px;\n'
|
| 101 |
+
' align-items: center;\n'
|
| 102 |
+
'}\n'
|
| 103 |
+
'.search-box input {\n'
|
| 104 |
+
' flex: 1;\n'
|
| 105 |
+
' padding: 5px 10px;\n'
|
| 106 |
+
' font-size: 14px;\n'
|
| 107 |
+
' border: 1px solid #ccc;\n'
|
| 108 |
+
' border-radius: 4px;\n'
|
| 109 |
+
' outline: none;\n'
|
| 110 |
+
'}\n'
|
| 111 |
+
'.search-box input:focus { border-color: #4CAF50; }\n'
|
| 112 |
+
'.search-box .count {\n'
|
| 113 |
+
' font-size: 12px;\n'
|
| 114 |
+
' color: #888;\n'
|
| 115 |
+
' white-space: nowrap;\n'
|
| 116 |
+
'}\n'
|
| 117 |
+
'#results {\n'
|
| 118 |
+
' max-height: 200px;\n'
|
| 119 |
+
' overflow-y: auto;\n'
|
| 120 |
+
' font-size: 14px;\n'
|
| 121 |
+
'}\n'
|
| 122 |
+
'.section { padding: 4px 0; }\n'
|
| 123 |
+
'.section-title {\n'
|
| 124 |
+
' font-size: 11px;\n'
|
| 125 |
+
' font-weight: 600;\n'
|
| 126 |
+
' color: #888;\n'
|
| 127 |
+
' text-transform: uppercase;\n'
|
| 128 |
+
' padding: 2px 0;\n'
|
| 129 |
+
'}\n'
|
| 130 |
+
'.match { padding: 1px 0; }\n'
|
| 131 |
+
'.match.exact { font-weight: 600; color: #2e7d32; }\n'
|
| 132 |
+
'.match mark { background: #fff9c4; padding: 0 1px; border-radius: 2px; }\n'
|
| 133 |
+
'.no-results { color: #999; padding: 4px 0; }\n'
|
| 134 |
+
'.hidden { display: none; }\n'
|
| 135 |
+
'</style>\n'
|
| 136 |
+
'</head>\n'
|
| 137 |
+
'<body>\n'
|
| 138 |
+
'<div class="highlights" id="highlights"></div>\n'
|
| 139 |
+
'<div class="search-area">\n'
|
| 140 |
+
' <div class="search-box">\n'
|
| 141 |
+
' <input type="text" id="q" placeholder="Search dictionary..." autocomplete="off">\n'
|
| 142 |
+
' <span class="count" id="dict-count"></span>\n'
|
| 143 |
+
' </div>\n'
|
| 144 |
+
' <div id="results" class="hidden"></div>\n'
|
| 145 |
+
'</div>\n'
|
| 146 |
+
'\n'
|
| 147 |
+
'<script>\n'
|
| 148 |
+
'const DICT = ' + dict_json + ';\n'
|
| 149 |
+
'const DICT_LOWER = DICT.map(e => e.toLowerCase());\n'
|
| 150 |
+
'const DICT_SET = new Set(DICT_LOWER);\n'
|
| 151 |
+
'document.getElementById("dict-count").textContent = DICT.length.toLocaleString() + " entries";\n'
|
| 152 |
+
'\n'
|
| 153 |
+
'// --- Highlight bar: read params from URL fragment ---\n'
|
| 154 |
+
'function parseFragment() {\n'
|
| 155 |
+
' const hash = decodeURIComponent(location.hash.slice(1));\n'
|
| 156 |
+
' const params = {};\n'
|
| 157 |
+
' for (const part of hash.split("&")) {\n'
|
| 158 |
+
' const eq = part.indexOf("=");\n'
|
| 159 |
+
' if (eq > 0) params[part.slice(0, eq)] = part.slice(eq + 1);\n'
|
| 160 |
+
' }\n'
|
| 161 |
+
' return params;\n'
|
| 162 |
+
'}\n'
|
| 163 |
+
'\n'
|
| 164 |
+
'function escHtml(s) {\n'
|
| 165 |
+
' return s.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">");\n'
|
| 166 |
+
'}\n'
|
| 167 |
+
'\n'
|
| 168 |
+
'function renderHighlights() {\n'
|
| 169 |
+
' const params = parseFragment();\n'
|
| 170 |
+
' const text = params.text || "";\n'
|
| 171 |
+
' const el = document.getElementById("highlights");\n'
|
| 172 |
+
' if (!text) { el.classList.add("hidden"); return; }\n'
|
| 173 |
+
' el.classList.remove("hidden");\n'
|
| 174 |
+
'\n'
|
| 175 |
+
' let spans;\n'
|
| 176 |
+
' try { spans = JSON.parse(params.spans || "[]"); } catch(e) { spans = []; }\n'
|
| 177 |
+
'\n'
|
| 178 |
+
' if (!spans.length) {\n'
|
| 179 |
+
' el.innerHTML = escHtml(text);\n'
|
| 180 |
+
' return;\n'
|
| 181 |
+
' }\n'
|
| 182 |
+
'\n'
|
| 183 |
+
' let html = "";\n'
|
| 184 |
+
' let prev = 0;\n'
|
| 185 |
+
' for (const sp of spans) {\n'
|
| 186 |
+
' const [start, end] = sp;\n'
|
| 187 |
+
' if (start > prev) html += escHtml(text.slice(prev, start));\n'
|
| 188 |
+
' const word = text.slice(start, end);\n'
|
| 189 |
+
' const nSyl = word.split(" ").length;\n'
|
| 190 |
+
' if (nSyl === 1) {\n'
|
| 191 |
+
' html += \'<span class="word" onclick="fillSearch(this)">\' + escHtml(word) + "</span>";\n'
|
| 192 |
+
' } else {\n'
|
| 193 |
+
' const inDict = DICT_SET.has(word.toLowerCase());\n'
|
| 194 |
+
' const cls = inDict ? "in-dict" : "not-dict";\n'
|
| 195 |
+
' html += \'<span class="\' + cls + \'" onclick="fillSearch(this)">\' + escHtml(word) + "</span>";\n'
|
| 196 |
+
' }\n'
|
| 197 |
+
' prev = end;\n'
|
| 198 |
+
' }\n'
|
| 199 |
+
' if (prev < text.length) html += escHtml(text.slice(prev));\n'
|
| 200 |
+
' el.innerHTML = html;\n'
|
| 201 |
+
'}\n'
|
| 202 |
+
'\n'
|
| 203 |
+
'function fillSearch(el) {\n'
|
| 204 |
+
' const q = document.getElementById("q");\n'
|
| 205 |
+
' q.value = el.textContent;\n'
|
| 206 |
+
' q.focus();\n'
|
| 207 |
+
' doSearch();\n'
|
| 208 |
+
'}\n'
|
| 209 |
+
'\n'
|
| 210 |
+
'renderHighlights();\n'
|
| 211 |
+
'\n'
|
| 212 |
+
'// --- Dictionary search ---\n'
|
| 213 |
+
'const input = document.getElementById("q");\n'
|
| 214 |
+
'const resultsDiv = document.getElementById("results");\n'
|
| 215 |
+
'let debounceTimer;\n'
|
| 216 |
+
'input.addEventListener("input", () => {\n'
|
| 217 |
+
' clearTimeout(debounceTimer);\n'
|
| 218 |
+
' debounceTimer = setTimeout(doSearch, 150);\n'
|
| 219 |
+
'});\n'
|
| 220 |
+
'\n'
|
| 221 |
+
'function doSearch() {\n'
|
| 222 |
+
' const q = input.value.trim().toLowerCase();\n'
|
| 223 |
+
' if (!q) { resultsDiv.classList.add("hidden"); return; }\n'
|
| 224 |
+
' const exact = [], prefix = [], substring = [];\n'
|
| 225 |
+
' const MAX = 50;\n'
|
| 226 |
+
' for (let i = 0; i < DICT_LOWER.length; i++) {\n'
|
| 227 |
+
' const e = DICT_LOWER[i];\n'
|
| 228 |
+
' if (e === q) exact.push(i);\n'
|
| 229 |
+
' else if (e.startsWith(q)) { if (prefix.length < MAX) prefix.push(i); }\n'
|
| 230 |
+
' else if (e.includes(q)) { if (substring.length < MAX) substring.push(i); }\n'
|
| 231 |
+
' if (prefix.length >= MAX && substring.length >= MAX && exact.length > 0) break;\n'
|
| 232 |
+
' }\n'
|
| 233 |
+
' let h = "";\n'
|
| 234 |
+
' if (exact.length > 0) {\n'
|
| 235 |
+
' h += \'<div class="section"><div class="section-title">Exact</div>\';\n'
|
| 236 |
+
' for (const i of exact) h += \'<div class="match exact">\' + hl(DICT[i], q) + "</div>";\n'
|
| 237 |
+
' h += "</div>";\n'
|
| 238 |
+
' }\n'
|
| 239 |
+
' if (prefix.length > 0) {\n'
|
| 240 |
+
' h += \'<div class="section"><div class="section-title">Prefix (\' + prefix.length + ")</div>";\n'
|
| 241 |
+
' for (const i of prefix) h += \'<div class="match">\' + hl(DICT[i], q) + "</div>";\n'
|
| 242 |
+
' h += "</div>";\n'
|
| 243 |
+
' }\n'
|
| 244 |
+
' if (substring.length > 0) {\n'
|
| 245 |
+
' h += \'<div class="section"><div class="section-title">Contains (\' + substring.length + ")</div>";\n'
|
| 246 |
+
' for (const i of substring) h += \'<div class="match">\' + hl(DICT[i], q) + "</div>";\n'
|
| 247 |
+
' h += "</div>";\n'
|
| 248 |
+
' }\n'
|
| 249 |
+
' if (!h) h = \'<div class="no-results">No matches</div>\';\n'
|
| 250 |
+
' resultsDiv.innerHTML = h;\n'
|
| 251 |
+
' resultsDiv.classList.remove("hidden");\n'
|
| 252 |
+
'}\n'
|
| 253 |
+
'\n'
|
| 254 |
+
'function hl(text, query) {\n'
|
| 255 |
+
' const idx = text.toLowerCase().indexOf(query);\n'
|
| 256 |
+
' if (idx < 0) return escHtml(text);\n'
|
| 257 |
+
' return escHtml(text.slice(0, idx)) + "<mark>" + escHtml(text.slice(idx, idx + query.length)) + "</mark>" + escHtml(text.slice(idx + query.length));\n'
|
| 258 |
+
'}\n'
|
| 259 |
+
'</script>\n'
|
| 260 |
+
'</body>\n'
|
| 261 |
+
'</html>'
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def main():
|
| 266 |
+
parser = argparse.ArgumentParser(
|
| 267 |
+
description="Generate dictionary plugin HTML for Label Studio"
|
| 268 |
+
)
|
| 269 |
+
parser.add_argument(
|
| 270 |
+
"--dict",
|
| 271 |
+
default=DEFAULT_DICT,
|
| 272 |
+
help=f"Path to dictionary.txt (default: {DEFAULT_DICT})",
|
| 273 |
+
)
|
| 274 |
+
parser.add_argument(
|
| 275 |
+
"--output",
|
| 276 |
+
default="src/dict_plugin.html",
|
| 277 |
+
help="Output HTML path (default: src/dict_plugin.html)",
|
| 278 |
+
)
|
| 279 |
+
parser.add_argument(
|
| 280 |
+
"--deploy",
|
| 281 |
+
action="store_true",
|
| 282 |
+
help="Also copy to LS static_build directory",
|
| 283 |
+
)
|
| 284 |
+
args = parser.parse_args()
|
| 285 |
+
|
| 286 |
+
dict_path = Path(args.dict)
|
| 287 |
+
if not dict_path.exists():
|
| 288 |
+
print(f"ERROR: {dict_path} not found")
|
| 289 |
+
return
|
| 290 |
+
|
| 291 |
+
entries = load_dictionary(dict_path)
|
| 292 |
+
print(f"Loaded {len(entries)} dictionary entries from {dict_path}")
|
| 293 |
+
|
| 294 |
+
html_content = build_html(entries)
|
| 295 |
+
output_path = Path(args.output)
|
| 296 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 297 |
+
output_path.write_text(html_content, encoding="utf-8")
|
| 298 |
+
|
| 299 |
+
size_kb = output_path.stat().st_size / 1024
|
| 300 |
+
print(f"Written {output_path} ({size_kb:.0f} KB)")
|
| 301 |
+
|
| 302 |
+
if args.deploy:
|
| 303 |
+
import shutil
|
| 304 |
+
for static_dir in LS_STATIC_DIRS:
|
| 305 |
+
deploy_path = Path(static_dir) / "dict_plugin.html"
|
| 306 |
+
if deploy_path.parent.exists():
|
| 307 |
+
shutil.copy2(output_path, deploy_path)
|
| 308 |
+
print(f"Deployed to {deploy_path}")
|
| 309 |
+
else:
|
| 310 |
+
print(f"WARNING: {deploy_path.parent} not found, skipping")
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
if __name__ == "__main__":
|
| 314 |
+
main()
|
src/build_dict_search.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.9"
|
| 3 |
+
# dependencies = []
|
| 4 |
+
# ///
|
| 5 |
+
"""Generate a self-contained HTML dictionary search page.
|
| 6 |
+
|
| 7 |
+
Reads dictionary.txt, NFC-normalizes entries, and embeds them in
|
| 8 |
+
a standalone HTML file with search functionality (exact, prefix,
|
| 9 |
+
substring matches).
|
| 10 |
+
|
| 11 |
+
Usage:
|
| 12 |
+
uv run src/build_dict_search.py --dict path/to/dictionary.txt
|
| 13 |
+
uv run src/build_dict_search.py # uses default path
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import html
|
| 18 |
+
import unicodedata
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
DEFAULT_DICT = (
|
| 22 |
+
"/home/claude-code/projects/workspace_underthesea/tree-1/"
|
| 23 |
+
"models/word_segmentation/udd_ws_v1_1-20260211_034002/dictionary.txt"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def nfc(text):
|
| 28 |
+
return unicodedata.normalize("NFC", text)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def load_dictionary(dict_path):
|
| 32 |
+
entries = []
|
| 33 |
+
with open(dict_path, encoding="utf-8") as f:
|
| 34 |
+
for line in f:
|
| 35 |
+
line = line.strip()
|
| 36 |
+
if line:
|
| 37 |
+
entries.append(nfc(line))
|
| 38 |
+
entries.sort()
|
| 39 |
+
return entries
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def build_html(entries):
|
| 43 |
+
# Escape entries for embedding in JS template literal
|
| 44 |
+
# Use JSON-safe escaping: replace backslash, backtick, ${
|
| 45 |
+
js_lines = []
|
| 46 |
+
for entry in entries:
|
| 47 |
+
escaped = html.escape(entry)
|
| 48 |
+
js_lines.append(escaped)
|
| 49 |
+
dict_joined = "\n".join(js_lines)
|
| 50 |
+
|
| 51 |
+
return f"""<!DOCTYPE html>
|
| 52 |
+
<html lang="vi">
|
| 53 |
+
<head>
|
| 54 |
+
<meta charset="UTF-8">
|
| 55 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 56 |
+
<title>Vietnamese Dictionary Search ({len(entries):,} entries)</title>
|
| 57 |
+
<style>
|
| 58 |
+
* {{ box-sizing: border-box; margin: 0; padding: 0; }}
|
| 59 |
+
body {{ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif; padding: 20px; max-width: 800px; margin: 0 auto; background: #fafafa; }}
|
| 60 |
+
h1 {{ font-size: 1.3em; margin-bottom: 4px; color: #333; }}
|
| 61 |
+
.subtitle {{ color: #666; font-size: 0.9em; margin-bottom: 16px; }}
|
| 62 |
+
.search-box {{ position: relative; margin-bottom: 16px; }}
|
| 63 |
+
input {{ width: 100%; padding: 12px 16px; font-size: 16px; border: 2px solid #ddd; border-radius: 8px; outline: none; }}
|
| 64 |
+
input:focus {{ border-color: #4CAF50; }}
|
| 65 |
+
.results {{ background: white; border-radius: 8px; border: 1px solid #e0e0e0; }}
|
| 66 |
+
.section {{ padding: 12px 16px; border-bottom: 1px solid #f0f0f0; }}
|
| 67 |
+
.section:last-child {{ border-bottom: none; }}
|
| 68 |
+
.section-title {{ font-size: 0.8em; font-weight: 600; color: #888; text-transform: uppercase; margin-bottom: 6px; }}
|
| 69 |
+
.match {{ padding: 4px 0; font-size: 15px; }}
|
| 70 |
+
.match.exact {{ font-weight: 600; color: #2e7d32; }}
|
| 71 |
+
.match mark {{ background: #fff9c4; padding: 0 1px; border-radius: 2px; }}
|
| 72 |
+
.no-results {{ padding: 16px; color: #999; text-align: center; }}
|
| 73 |
+
.stats {{ color: #999; font-size: 0.85em; padding: 8px 16px; text-align: right; }}
|
| 74 |
+
.hidden {{ display: none; }}
|
| 75 |
+
</style>
|
| 76 |
+
</head>
|
| 77 |
+
<body>
|
| 78 |
+
<h1>Vietnamese Dictionary Search</h1>
|
| 79 |
+
<p class="subtitle">{len(entries):,} entries</p>
|
| 80 |
+
<div class="search-box">
|
| 81 |
+
<input type="text" id="q" placeholder="Type a word to search..." autofocus autocomplete="off">
|
| 82 |
+
</div>
|
| 83 |
+
<div id="results" class="results hidden"></div>
|
| 84 |
+
|
| 85 |
+
<script>
|
| 86 |
+
const RAW = `{dict_joined}`;
|
| 87 |
+
const DICT = RAW.split("\\n");
|
| 88 |
+
const DICT_SET = new Set(DICT);
|
| 89 |
+
|
| 90 |
+
// Decode HTML entities for matching
|
| 91 |
+
function decode(s) {{
|
| 92 |
+
const el = document.createElement("span");
|
| 93 |
+
el.innerHTML = s;
|
| 94 |
+
return el.textContent;
|
| 95 |
+
}}
|
| 96 |
+
|
| 97 |
+
const DICT_DECODED = DICT.map(decode);
|
| 98 |
+
|
| 99 |
+
const input = document.getElementById("q");
|
| 100 |
+
const resultsDiv = document.getElementById("results");
|
| 101 |
+
|
| 102 |
+
let debounceTimer;
|
| 103 |
+
input.addEventListener("input", () => {{
|
| 104 |
+
clearTimeout(debounceTimer);
|
| 105 |
+
debounceTimer = setTimeout(doSearch, 150);
|
| 106 |
+
}});
|
| 107 |
+
|
| 108 |
+
function doSearch() {{
|
| 109 |
+
const q = input.value.trim().toLowerCase();
|
| 110 |
+
if (!q) {{
|
| 111 |
+
resultsDiv.classList.add("hidden");
|
| 112 |
+
return;
|
| 113 |
+
}}
|
| 114 |
+
|
| 115 |
+
const exact = [];
|
| 116 |
+
const prefix = [];
|
| 117 |
+
const substring = [];
|
| 118 |
+
const MAX = 50;
|
| 119 |
+
|
| 120 |
+
for (let i = 0; i < DICT_DECODED.length; i++) {{
|
| 121 |
+
const entry = DICT_DECODED[i];
|
| 122 |
+
const lower = entry.toLowerCase();
|
| 123 |
+
if (lower === q) {{
|
| 124 |
+
exact.push(i);
|
| 125 |
+
}} else if (lower.startsWith(q)) {{
|
| 126 |
+
if (prefix.length < MAX) prefix.push(i);
|
| 127 |
+
}} else if (lower.includes(q)) {{
|
| 128 |
+
if (substring.length < MAX) substring.push(i);
|
| 129 |
+
}}
|
| 130 |
+
if (prefix.length >= MAX && substring.length >= MAX && exact.length > 0) break;
|
| 131 |
+
}}
|
| 132 |
+
|
| 133 |
+
let html = "";
|
| 134 |
+
|
| 135 |
+
if (exact.length > 0) {{
|
| 136 |
+
html += '<div class="section"><div class="section-title">Exact match</div>';
|
| 137 |
+
for (const i of exact) {{
|
| 138 |
+
html += '<div class="match exact">' + highlight(DICT_DECODED[i], q) + "</div>";
|
| 139 |
+
}}
|
| 140 |
+
html += "</div>";
|
| 141 |
+
}}
|
| 142 |
+
|
| 143 |
+
if (prefix.length > 0) {{
|
| 144 |
+
html += '<div class="section"><div class="section-title">Prefix matches (' + prefix.length + ')</div>';
|
| 145 |
+
for (const i of prefix) {{
|
| 146 |
+
html += '<div class="match">' + highlight(DICT_DECODED[i], q) + "</div>";
|
| 147 |
+
}}
|
| 148 |
+
html += "</div>";
|
| 149 |
+
}}
|
| 150 |
+
|
| 151 |
+
if (substring.length > 0) {{
|
| 152 |
+
html += '<div class="section"><div class="section-title">Substring matches (' + substring.length + ')</div>';
|
| 153 |
+
for (const i of substring) {{
|
| 154 |
+
html += '<div class="match">' + highlight(DICT_DECODED[i], q) + "</div>";
|
| 155 |
+
}}
|
| 156 |
+
html += "</div>";
|
| 157 |
+
}}
|
| 158 |
+
|
| 159 |
+
if (!html) {{
|
| 160 |
+
html = '<div class="no-results">No matches found</div>';
|
| 161 |
+
}}
|
| 162 |
+
|
| 163 |
+
const total = exact.length + prefix.length + substring.length;
|
| 164 |
+
html += '<div class="stats">' + total + " result(s)</div>";
|
| 165 |
+
|
| 166 |
+
resultsDiv.innerHTML = html;
|
| 167 |
+
resultsDiv.classList.remove("hidden");
|
| 168 |
+
}}
|
| 169 |
+
|
| 170 |
+
function highlight(text, query) {{
|
| 171 |
+
const lower = text.toLowerCase();
|
| 172 |
+
const idx = lower.indexOf(query);
|
| 173 |
+
if (idx < 0) return escapeHtml(text);
|
| 174 |
+
return escapeHtml(text.substring(0, idx))
|
| 175 |
+
+ "<mark>" + escapeHtml(text.substring(idx, idx + query.length)) + "</mark>"
|
| 176 |
+
+ escapeHtml(text.substring(idx + query.length));
|
| 177 |
+
}}
|
| 178 |
+
|
| 179 |
+
function escapeHtml(s) {{
|
| 180 |
+
return s.replace(/&/g, "&").replace(/</g, "<").replace(/>/g, ">");
|
| 181 |
+
}}
|
| 182 |
+
</script>
|
| 183 |
+
</body>
|
| 184 |
+
</html>"""
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def main():
|
| 188 |
+
parser = argparse.ArgumentParser(
|
| 189 |
+
description="Generate standalone dictionary search HTML page"
|
| 190 |
+
)
|
| 191 |
+
parser.add_argument(
|
| 192 |
+
"--dict",
|
| 193 |
+
default=DEFAULT_DICT,
|
| 194 |
+
help=f"Path to dictionary.txt (default: {DEFAULT_DICT})",
|
| 195 |
+
)
|
| 196 |
+
parser.add_argument(
|
| 197 |
+
"--output",
|
| 198 |
+
default="src/dict_search.html",
|
| 199 |
+
help="Output HTML path (default: src/dict_search.html)",
|
| 200 |
+
)
|
| 201 |
+
args = parser.parse_args()
|
| 202 |
+
|
| 203 |
+
dict_path = Path(args.dict)
|
| 204 |
+
if not dict_path.exists():
|
| 205 |
+
print(f"ERROR: {dict_path} not found")
|
| 206 |
+
return
|
| 207 |
+
|
| 208 |
+
entries = load_dictionary(dict_path)
|
| 209 |
+
print(f"Loaded {len(entries)} dictionary entries from {dict_path}")
|
| 210 |
+
|
| 211 |
+
html_content = build_html(entries)
|
| 212 |
+
output_path = Path(args.output)
|
| 213 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 214 |
+
output_path.write_text(html_content, encoding="utf-8")
|
| 215 |
+
|
| 216 |
+
size_kb = output_path.stat().st_size / 1024
|
| 217 |
+
print(f"Written {output_path} ({size_kb:.0f} KB)")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
if __name__ == "__main__":
|
| 221 |
+
main()
|
src/dict_plugin.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/dict_search.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
src/ls_config_ws.xml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<View>
|
| 2 |
+
<Header value="Word Segmentation: Select each word as a span. Multi-syllable words = one span."/>
|
| 3 |
+
<Header value="Sentence: $sent_id | Rank: $rank" size="4"/>
|
| 4 |
+
<View style="border:1px solid #e0e0e0; border-radius:8px; overflow:hidden; margin-bottom:8px">
|
| 5 |
+
<HyperText name="dict_view" value="$dict_iframe" inline="true"/>
|
| 6 |
+
</View>
|
| 7 |
+
<Labels name="label" toName="text" choice="single">
|
| 8 |
+
<Label value="W" background="#4CAF50" hotkey="w"/>
|
| 9 |
+
<Label value="WH" background="#2196F3" hotkey="h"/>
|
| 10 |
+
<Label value="WM" background="#FFC107" hotkey="m"/>
|
| 11 |
+
<Label value="WL" background="#F44336" hotkey="l"/>
|
| 12 |
+
</Labels>
|
| 13 |
+
<Text name="text" value="$text" granularity="word"/>
|
| 14 |
+
</View>
|
src/ls_export_ws.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Convert Label Studio JSON export to BIO format.
|
| 2 |
+
|
| 3 |
+
Reads a Label Studio JSON export (with annotations), converts span
|
| 4 |
+
annotations back to syllable-level BIO tags, and writes BIO format
|
| 5 |
+
compatible with tree-1's load_data_vlsp2013().
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
uv run src/ls_export_ws.py --input ls_export_cycle1.json --output gold_ws_cycle1.txt
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import json
|
| 13 |
+
import sys
|
| 14 |
+
import unicodedata
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def nfc(text):
|
| 19 |
+
"""Normalize text to NFC for consistent Unicode handling."""
|
| 20 |
+
return unicodedata.normalize("NFC", text)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def text_to_syllable_offsets(text):
|
| 24 |
+
"""Compute (start, end) character offsets for each space-delimited syllable.
|
| 25 |
+
|
| 26 |
+
Returns list of (start, end) tuples.
|
| 27 |
+
"""
|
| 28 |
+
offsets = []
|
| 29 |
+
pos = 0
|
| 30 |
+
for syl in text.split(" "):
|
| 31 |
+
start = pos
|
| 32 |
+
end = pos + len(syl)
|
| 33 |
+
offsets.append((start, end))
|
| 34 |
+
pos = end + 1 # skip space
|
| 35 |
+
return offsets
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def spans_to_bio(spans, text):
|
| 39 |
+
"""Convert Label Studio span annotations to syllable-level BIO tags.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
spans: list of span annotation dicts (from Label Studio result)
|
| 43 |
+
text: the original text string
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
(syllables, tags) tuple
|
| 47 |
+
"""
|
| 48 |
+
syllables = text.split(" ")
|
| 49 |
+
syl_offsets = text_to_syllable_offsets(text)
|
| 50 |
+
n = len(syllables)
|
| 51 |
+
tags = [None] * n
|
| 52 |
+
|
| 53 |
+
# Sort spans by start offset
|
| 54 |
+
sorted_spans = sorted(spans, key=lambda s: s["value"]["start"])
|
| 55 |
+
|
| 56 |
+
for span in sorted_spans:
|
| 57 |
+
span_start = span["value"]["start"]
|
| 58 |
+
span_end = span["value"]["end"]
|
| 59 |
+
|
| 60 |
+
# Find syllables covered by this span
|
| 61 |
+
first_syl = None
|
| 62 |
+
for i, (s, e) in enumerate(syl_offsets):
|
| 63 |
+
if s >= span_start and e <= span_end:
|
| 64 |
+
if first_syl is None:
|
| 65 |
+
first_syl = i
|
| 66 |
+
tags[i] = "B-W"
|
| 67 |
+
else:
|
| 68 |
+
tags[i] = "I-W"
|
| 69 |
+
|
| 70 |
+
return syllables, tags
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def validate_bio(syllables, tags, sent_id):
|
| 74 |
+
"""Validate BIO tags: all syllables tagged, no gaps."""
|
| 75 |
+
errors = []
|
| 76 |
+
for i, (syl, tag) in enumerate(zip(syllables, tags)):
|
| 77 |
+
if tag is None:
|
| 78 |
+
errors.append(f"Syllable {i} ({syl!r}) has no tag")
|
| 79 |
+
elif tag not in ("B-W", "I-W"):
|
| 80 |
+
errors.append(f"Syllable {i} ({syl!r}) has invalid tag {tag!r}")
|
| 81 |
+
|
| 82 |
+
if tags and tags[0] == "I-W":
|
| 83 |
+
errors.append("First syllable has I-W tag (should be B-W)")
|
| 84 |
+
|
| 85 |
+
for i in range(1, len(tags)):
|
| 86 |
+
if tags[i] == "I-W" and tags[i - 1] is None:
|
| 87 |
+
errors.append(f"I-W at position {i} follows untagged syllable")
|
| 88 |
+
|
| 89 |
+
if errors:
|
| 90 |
+
return [f"[{sent_id}] {e}" for e in errors]
|
| 91 |
+
return []
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def get_annotations(task):
|
| 95 |
+
"""Extract the best annotation result from a task.
|
| 96 |
+
|
| 97 |
+
Prefers completed annotations over predictions.
|
| 98 |
+
For annotations, takes the most recent one.
|
| 99 |
+
"""
|
| 100 |
+
# Check for human annotations first
|
| 101 |
+
annotations = task.get("annotations", [])
|
| 102 |
+
if annotations:
|
| 103 |
+
# Filter to completed annotations
|
| 104 |
+
completed = [a for a in annotations if not a.get("was_cancelled", False)]
|
| 105 |
+
if completed:
|
| 106 |
+
# Take the most recent
|
| 107 |
+
best = max(completed, key=lambda a: a.get("updated_at", ""))
|
| 108 |
+
return best.get("result", [])
|
| 109 |
+
|
| 110 |
+
# Fall back to predictions
|
| 111 |
+
predictions = task.get("predictions", [])
|
| 112 |
+
if predictions:
|
| 113 |
+
return predictions[0].get("result", [])
|
| 114 |
+
|
| 115 |
+
return None
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def main():
|
| 119 |
+
parser = argparse.ArgumentParser(
|
| 120 |
+
description="Convert Label Studio JSON export to BIO format"
|
| 121 |
+
)
|
| 122 |
+
parser.add_argument(
|
| 123 |
+
"--input",
|
| 124 |
+
required=True,
|
| 125 |
+
help="Path to Label Studio JSON export",
|
| 126 |
+
)
|
| 127 |
+
parser.add_argument(
|
| 128 |
+
"--output",
|
| 129 |
+
required=True,
|
| 130 |
+
help="Output BIO file path",
|
| 131 |
+
)
|
| 132 |
+
parser.add_argument(
|
| 133 |
+
"--require-annotation",
|
| 134 |
+
action="store_true",
|
| 135 |
+
help="Only include tasks with human annotations (skip prediction-only)",
|
| 136 |
+
)
|
| 137 |
+
args = parser.parse_args()
|
| 138 |
+
|
| 139 |
+
# Read export
|
| 140 |
+
with open(args.input, "r", encoding="utf-8") as f:
|
| 141 |
+
tasks = json.load(f)
|
| 142 |
+
print(f"Loaded {len(tasks)} tasks from {args.input}")
|
| 143 |
+
|
| 144 |
+
sentences = []
|
| 145 |
+
errors = []
|
| 146 |
+
n_annotated = 0
|
| 147 |
+
n_prediction_only = 0
|
| 148 |
+
|
| 149 |
+
for task in tasks:
|
| 150 |
+
data = task.get("data", {})
|
| 151 |
+
text = nfc(data.get("text", ""))
|
| 152 |
+
sent_id = data.get("sent_id", "unknown")
|
| 153 |
+
|
| 154 |
+
if not text:
|
| 155 |
+
errors.append(f"[{sent_id}] Empty text")
|
| 156 |
+
continue
|
| 157 |
+
|
| 158 |
+
# Get annotations
|
| 159 |
+
annotations = task.get("annotations", [])
|
| 160 |
+
completed = [a for a in annotations if not a.get("was_cancelled", False)]
|
| 161 |
+
has_human = len(completed) > 0
|
| 162 |
+
|
| 163 |
+
if args.require_annotation and not has_human:
|
| 164 |
+
n_prediction_only += 1
|
| 165 |
+
continue
|
| 166 |
+
|
| 167 |
+
result = get_annotations(task)
|
| 168 |
+
if result is None:
|
| 169 |
+
errors.append(f"[{sent_id}] No annotations or predictions")
|
| 170 |
+
continue
|
| 171 |
+
|
| 172 |
+
if has_human:
|
| 173 |
+
n_annotated += 1
|
| 174 |
+
else:
|
| 175 |
+
n_prediction_only += 1
|
| 176 |
+
|
| 177 |
+
# Filter to label spans only
|
| 178 |
+
spans = [r for r in result if r.get("type") == "labels"]
|
| 179 |
+
|
| 180 |
+
# Convert to BIO
|
| 181 |
+
syllables, tags = spans_to_bio(spans, text)
|
| 182 |
+
|
| 183 |
+
# Validate
|
| 184 |
+
bio_errors = validate_bio(syllables, tags, sent_id)
|
| 185 |
+
if bio_errors:
|
| 186 |
+
errors.extend(bio_errors)
|
| 187 |
+
continue
|
| 188 |
+
|
| 189 |
+
sentences.append({
|
| 190 |
+
"sent_id": sent_id,
|
| 191 |
+
"text": text,
|
| 192 |
+
"syllables": syllables,
|
| 193 |
+
"tags": tags,
|
| 194 |
+
})
|
| 195 |
+
|
| 196 |
+
# Report
|
| 197 |
+
print(f"Human-annotated: {n_annotated}")
|
| 198 |
+
print(f"Prediction-only: {n_prediction_only}")
|
| 199 |
+
|
| 200 |
+
if errors:
|
| 201 |
+
print(f"\n{len(errors)} errors:")
|
| 202 |
+
for e in errors:
|
| 203 |
+
print(f" - {e}")
|
| 204 |
+
|
| 205 |
+
print(f"\nConverted {len(sentences)} sentences")
|
| 206 |
+
|
| 207 |
+
# Sort by sent_id for deterministic output
|
| 208 |
+
sentences.sort(key=lambda s: s["sent_id"])
|
| 209 |
+
|
| 210 |
+
# Write BIO
|
| 211 |
+
output_path = Path(args.output)
|
| 212 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 213 |
+
for sent in sentences:
|
| 214 |
+
f.write(f"# sent_id = {sent['sent_id']}\n")
|
| 215 |
+
f.write(f"# text = {sent['text']}\n")
|
| 216 |
+
for syl, tag in zip(sent["syllables"], sent["tags"]):
|
| 217 |
+
f.write(f"{syl}\t{tag}\n")
|
| 218 |
+
f.write("\n")
|
| 219 |
+
|
| 220 |
+
print(f"Written to {output_path}")
|
| 221 |
+
|
| 222 |
+
if errors:
|
| 223 |
+
sys.exit(1)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
if __name__ == "__main__":
|
| 227 |
+
main()
|
src/ls_import_ws.py
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.9"
|
| 3 |
+
# dependencies = ["python-crfsuite"]
|
| 4 |
+
# ///
|
| 5 |
+
"""Convert AL Cycle 0 top-500 TSV + BIO files to Label Studio JSON.
|
| 6 |
+
|
| 7 |
+
Reads al_cycle0_top500.tsv to identify which sentences to annotate,
|
| 8 |
+
looks up their silver BIO tags from udd-ws-v1.1-{dev,test}.txt,
|
| 9 |
+
and produces a Label Studio import JSON with pre-annotations.
|
| 10 |
+
|
| 11 |
+
When --model is provided, computes per-span confidence scores from CRF
|
| 12 |
+
marginal probabilities. Each word span gets score = min confidence across
|
| 13 |
+
its syllables, so words with any uncertain boundary get a low score.
|
| 14 |
+
|
| 15 |
+
When --dict is provided (standalone or auto-detected from model dir),
|
| 16 |
+
generates dict_html field with dictionary match highlights:
|
| 17 |
+
green = multi-syllable span in dictionary
|
| 18 |
+
pink/red = multi-syllable span NOT in dictionary
|
| 19 |
+
|
| 20 |
+
Usage:
|
| 21 |
+
uv run src/ls_import_ws.py [--validate]
|
| 22 |
+
uv run src/ls_import_ws.py --dict path/to/dictionary.txt
|
| 23 |
+
uv run src/ls_import_ws.py --model path/to/model.crfsuite
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
import argparse
|
| 27 |
+
import json
|
| 28 |
+
import sys
|
| 29 |
+
import unicodedata
|
| 30 |
+
from pathlib import Path
|
| 31 |
+
from urllib.parse import quote
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def parse_bio_file(filepath):
|
| 35 |
+
"""Parse BIO file into list of sentences.
|
| 36 |
+
|
| 37 |
+
Returns list of dicts with keys: sent_id, text, syllables, tags.
|
| 38 |
+
Reused from fix_ws_errors.py.
|
| 39 |
+
"""
|
| 40 |
+
sentences = []
|
| 41 |
+
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
|
| 42 |
+
|
| 43 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 44 |
+
for line in f:
|
| 45 |
+
line = line.rstrip("\n")
|
| 46 |
+
if line.startswith("# sent_id = "):
|
| 47 |
+
current["sent_id"] = line.split("= ", 1)[1]
|
| 48 |
+
continue
|
| 49 |
+
if line.startswith("# text = "):
|
| 50 |
+
current["text"] = line.split("= ", 1)[1]
|
| 51 |
+
continue
|
| 52 |
+
if line.startswith("#"):
|
| 53 |
+
continue
|
| 54 |
+
if not line:
|
| 55 |
+
if current["syllables"]:
|
| 56 |
+
sentences.append(dict(current))
|
| 57 |
+
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
|
| 58 |
+
continue
|
| 59 |
+
parts = line.split("\t")
|
| 60 |
+
if len(parts) == 2:
|
| 61 |
+
current["syllables"].append(parts[0])
|
| 62 |
+
current["tags"].append(parts[1])
|
| 63 |
+
|
| 64 |
+
if current["syllables"]:
|
| 65 |
+
sentences.append(dict(current))
|
| 66 |
+
|
| 67 |
+
return sentences
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def parse_tsv(filepath):
|
| 71 |
+
"""Parse al_cycle0_top500.tsv.
|
| 72 |
+
|
| 73 |
+
Returns list of dicts with keys: rank, file, sent_idx, score, text.
|
| 74 |
+
"""
|
| 75 |
+
rows = []
|
| 76 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 77 |
+
header = f.readline().rstrip("\n").split("\t")
|
| 78 |
+
for line in f:
|
| 79 |
+
line = line.rstrip("\n")
|
| 80 |
+
if not line:
|
| 81 |
+
continue
|
| 82 |
+
fields = line.split("\t")
|
| 83 |
+
row = dict(zip(header, fields))
|
| 84 |
+
rows.append({
|
| 85 |
+
"rank": int(row["rank"]),
|
| 86 |
+
"file": row["file"],
|
| 87 |
+
"sent_idx": int(row["sent_idx"]),
|
| 88 |
+
"score": float(row["score"]),
|
| 89 |
+
"text": row["text"],
|
| 90 |
+
})
|
| 91 |
+
return rows
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def nfc(text):
|
| 95 |
+
"""Normalize text to NFC for consistent Unicode handling."""
|
| 96 |
+
return unicodedata.normalize("NFC", text)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def confidence_to_label(conf):
|
| 100 |
+
"""Map confidence score to label: WH (>=0.8), WM (0.6-0.8), WL (<0.6)."""
|
| 101 |
+
if conf >= 0.8:
|
| 102 |
+
return "WH"
|
| 103 |
+
elif conf >= 0.6:
|
| 104 |
+
return "WM"
|
| 105 |
+
else:
|
| 106 |
+
return "WL"
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def bio_to_spans(syllables, tags, text, confidences=None):
|
| 110 |
+
"""Convert BIO tags to Label Studio span annotations.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
syllables: list of syllable strings
|
| 114 |
+
tags: list of BIO tags (B-W / I-W)
|
| 115 |
+
text: space-joined text string
|
| 116 |
+
confidences: optional list of per-syllable confidence floats
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
list of span dicts for Label Studio predictions
|
| 120 |
+
"""
|
| 121 |
+
spans = []
|
| 122 |
+
span_start = None
|
| 123 |
+
span_syllables = []
|
| 124 |
+
span_syl_indices = []
|
| 125 |
+
char_pos = 0
|
| 126 |
+
|
| 127 |
+
def _close_span():
|
| 128 |
+
span_text = " ".join(span_syllables)
|
| 129 |
+
# Choose label based on confidence: WH/WM/WL for predictions, W if no confidence
|
| 130 |
+
if confidences is not None:
|
| 131 |
+
span_conf = min(confidences[j] for j in span_syl_indices)
|
| 132 |
+
label = confidence_to_label(span_conf)
|
| 133 |
+
else:
|
| 134 |
+
label = "W"
|
| 135 |
+
span_dict = {
|
| 136 |
+
"id": f"s{len(spans)}",
|
| 137 |
+
"from_name": "label",
|
| 138 |
+
"to_name": "text",
|
| 139 |
+
"type": "labels",
|
| 140 |
+
"value": {
|
| 141 |
+
"start": span_start,
|
| 142 |
+
"end": span_start + len(span_text),
|
| 143 |
+
"text": span_text,
|
| 144 |
+
"labels": [label],
|
| 145 |
+
},
|
| 146 |
+
}
|
| 147 |
+
if confidences is not None:
|
| 148 |
+
span_dict["score"] = round(span_conf, 4)
|
| 149 |
+
spans.append(span_dict)
|
| 150 |
+
|
| 151 |
+
for i, (syl, tag) in enumerate(zip(syllables, tags)):
|
| 152 |
+
# Verify syllable matches text at expected position
|
| 153 |
+
expected = text[char_pos:char_pos + len(syl)]
|
| 154 |
+
if nfc(expected) != nfc(syl):
|
| 155 |
+
raise ValueError(
|
| 156 |
+
f"Syllable mismatch at pos {char_pos}: "
|
| 157 |
+
f"expected {syl!r}, got {expected!r} in text {text!r}"
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if tag == "B-W":
|
| 161 |
+
# Close previous span
|
| 162 |
+
if span_syllables:
|
| 163 |
+
_close_span()
|
| 164 |
+
span_start = char_pos
|
| 165 |
+
span_syllables = [syl]
|
| 166 |
+
span_syl_indices = [i]
|
| 167 |
+
elif tag == "I-W":
|
| 168 |
+
span_syllables.append(syl)
|
| 169 |
+
span_syl_indices.append(i)
|
| 170 |
+
else:
|
| 171 |
+
raise ValueError(f"Unknown tag {tag!r} for syllable {syl!r}")
|
| 172 |
+
|
| 173 |
+
char_pos += len(syl)
|
| 174 |
+
# Skip space between syllables
|
| 175 |
+
if i < len(syllables) - 1:
|
| 176 |
+
char_pos += 1 # space separator
|
| 177 |
+
|
| 178 |
+
# Close final span
|
| 179 |
+
if span_syllables:
|
| 180 |
+
_close_span()
|
| 181 |
+
|
| 182 |
+
return spans
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def add_dict_meta(spans, dictionary):
|
| 186 |
+
"""Add dictionary lookup metadata to each span.
|
| 187 |
+
|
| 188 |
+
When the annotator selects a region, LS shows the meta fields in
|
| 189 |
+
the Region Details panel. This provides instant dictionary feedback.
|
| 190 |
+
|
| 191 |
+
For multi-syllable spans: shows dict status + up to 5 similar entries.
|
| 192 |
+
For single-syllable spans: shows dict status only.
|
| 193 |
+
"""
|
| 194 |
+
for span in spans:
|
| 195 |
+
span_text = span["value"]["text"]
|
| 196 |
+
normalized = nfc(span_text.lower().strip())
|
| 197 |
+
n_syllables = span_text.count(" ") + 1
|
| 198 |
+
in_dict = normalized in dictionary
|
| 199 |
+
|
| 200 |
+
if n_syllables == 1:
|
| 201 |
+
span["meta"] = {"dict": "✓" if in_dict else "—"}
|
| 202 |
+
else:
|
| 203 |
+
if in_dict:
|
| 204 |
+
span["meta"] = {"dict": "✓ in dict"}
|
| 205 |
+
else:
|
| 206 |
+
# Find similar entries (prefix match on first syllable)
|
| 207 |
+
first_syl = normalized.split()[0]
|
| 208 |
+
similar = sorted(
|
| 209 |
+
e for e in dictionary
|
| 210 |
+
if e.startswith(first_syl + " ") and e != normalized
|
| 211 |
+
)[:5]
|
| 212 |
+
meta = {"dict": "✗ not found"}
|
| 213 |
+
if similar:
|
| 214 |
+
meta["similar"] = ", ".join(similar)
|
| 215 |
+
span["meta"] = meta
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def build_dict_iframe(spans, text):
|
| 219 |
+
"""Build an iframe tag pointing to the dictionary plugin page.
|
| 220 |
+
|
| 221 |
+
Encodes the sentence text and span positions in the URL fragment so the
|
| 222 |
+
plugin page can render highlights and search without a server round-trip.
|
| 223 |
+
The ~900KB dictionary page is loaded once and cached by the browser.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
spans: list of span dicts from bio_to_spans()
|
| 227 |
+
text: the sentence text
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
HTML iframe tag string for the HyperText panel
|
| 231 |
+
"""
|
| 232 |
+
# Encode spans as compact [start, end] pairs
|
| 233 |
+
span_pairs = [
|
| 234 |
+
[s["value"]["start"], s["value"]["end"]] for s in spans
|
| 235 |
+
]
|
| 236 |
+
spans_json = json.dumps(span_pairs, separators=(",", ":"))
|
| 237 |
+
|
| 238 |
+
fragment = f"text={quote(text, safe='')}&spans={quote(spans_json, safe='')}"
|
| 239 |
+
return (
|
| 240 |
+
f"<iframe src='/static/dict_plugin.html#{fragment}'"
|
| 241 |
+
f" width='100%' height='180' style='border:none'></iframe>"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def validate_spans(spans, text, syllables):
|
| 246 |
+
"""Validate that spans cover every character in text without gaps/overlaps."""
|
| 247 |
+
text_len = len(text)
|
| 248 |
+
covered = [False] * text_len
|
| 249 |
+
|
| 250 |
+
for span in spans:
|
| 251 |
+
start = span["value"]["start"]
|
| 252 |
+
end = span["value"]["end"]
|
| 253 |
+
for j in range(start, end):
|
| 254 |
+
if covered[j]:
|
| 255 |
+
raise ValueError(f"Overlap at char {j} in text {text!r}")
|
| 256 |
+
covered[j] = True
|
| 257 |
+
|
| 258 |
+
# Check that all non-space characters are covered
|
| 259 |
+
for j, ch in enumerate(text):
|
| 260 |
+
if ch != " " and not covered[j]:
|
| 261 |
+
raise ValueError(f"Gap at char {j} ({ch!r}) in text {text!r}")
|
| 262 |
+
|
| 263 |
+
# Check span count matches word count
|
| 264 |
+
n_words = sum(1 for t in syllables if t == "B-W") # tags not syllables
|
| 265 |
+
# Actually count from spans
|
| 266 |
+
if len(spans) != n_words:
|
| 267 |
+
raise ValueError(
|
| 268 |
+
f"Span count {len(spans)} != word count {n_words} in text {text!r}"
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def load_dictionary(dict_path):
|
| 273 |
+
"""Load dictionary file into a set of NFC-normalized entries.
|
| 274 |
+
|
| 275 |
+
Returns set of lowercase NFC-normalized dictionary entries.
|
| 276 |
+
"""
|
| 277 |
+
dictionary = set()
|
| 278 |
+
with open(dict_path, encoding="utf-8") as f:
|
| 279 |
+
for line in f:
|
| 280 |
+
line = line.strip()
|
| 281 |
+
if line:
|
| 282 |
+
dictionary.add(nfc(line.lower()))
|
| 283 |
+
print(f"Dictionary: {len(dictionary)} entries from {dict_path}")
|
| 284 |
+
return dictionary
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def load_crf_scorer(model_path):
|
| 288 |
+
"""Load CRF tagger for confidence scoring.
|
| 289 |
+
|
| 290 |
+
Returns tagger object.
|
| 291 |
+
"""
|
| 292 |
+
import pycrfsuite
|
| 293 |
+
|
| 294 |
+
tagger = pycrfsuite.Tagger()
|
| 295 |
+
tagger.open(str(model_path))
|
| 296 |
+
return tagger
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def compute_confidences(tagger, syllables, dictionary=None):
|
| 300 |
+
"""Compute per-syllable confidence from CRF marginal probabilities.
|
| 301 |
+
|
| 302 |
+
Confidence = max(P(B|x,i), P(I|x,i)) for each syllable position.
|
| 303 |
+
Uses feature extraction from al_score_ws.py.
|
| 304 |
+
"""
|
| 305 |
+
# Import feature extraction (same as al_score_ws.py)
|
| 306 |
+
sys.path.insert(0, str(Path(__file__).resolve().parent))
|
| 307 |
+
from al_score_ws import sentence_to_features
|
| 308 |
+
|
| 309 |
+
xseq = sentence_to_features(syllables, dictionary)
|
| 310 |
+
tagger.set(xseq)
|
| 311 |
+
|
| 312 |
+
confidences = []
|
| 313 |
+
for i in range(len(syllables)):
|
| 314 |
+
p_b = tagger.marginal("B", i)
|
| 315 |
+
p_i = tagger.marginal("I", i)
|
| 316 |
+
confidences.append(max(p_b, p_i))
|
| 317 |
+
|
| 318 |
+
return confidences
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def main():
|
| 322 |
+
parser = argparse.ArgumentParser(
|
| 323 |
+
description="Convert AL top-500 + BIO to Label Studio JSON"
|
| 324 |
+
)
|
| 325 |
+
parser.add_argument(
|
| 326 |
+
"--tsv",
|
| 327 |
+
default="al_cycle0_top500.tsv",
|
| 328 |
+
help="Path to ranked TSV file (default: al_cycle0_top500.tsv)",
|
| 329 |
+
)
|
| 330 |
+
parser.add_argument(
|
| 331 |
+
"--bio-dir",
|
| 332 |
+
default=".",
|
| 333 |
+
help="Directory containing udd-ws-v1.1-{dev,test}.txt (default: .)",
|
| 334 |
+
)
|
| 335 |
+
parser.add_argument(
|
| 336 |
+
"--output",
|
| 337 |
+
default="ls_import_cycle1.json",
|
| 338 |
+
help="Output JSON path (default: ls_import_cycle1.json)",
|
| 339 |
+
)
|
| 340 |
+
parser.add_argument(
|
| 341 |
+
"--validate",
|
| 342 |
+
action="store_true",
|
| 343 |
+
help="Run validation checks on all tasks",
|
| 344 |
+
)
|
| 345 |
+
parser.add_argument(
|
| 346 |
+
"--model",
|
| 347 |
+
default=None,
|
| 348 |
+
help="Path to CRF model (.crfsuite) for per-span confidence scores",
|
| 349 |
+
)
|
| 350 |
+
parser.add_argument(
|
| 351 |
+
"--dict",
|
| 352 |
+
default=None,
|
| 353 |
+
help="Path to dictionary.txt (auto-detected from model dir if not set)",
|
| 354 |
+
)
|
| 355 |
+
args = parser.parse_args()
|
| 356 |
+
|
| 357 |
+
root = Path(args.bio_dir)
|
| 358 |
+
|
| 359 |
+
# Load dictionary (standalone or auto-detected from model dir)
|
| 360 |
+
dictionary = None
|
| 361 |
+
if args.dict:
|
| 362 |
+
dictionary = load_dictionary(args.dict)
|
| 363 |
+
elif args.model:
|
| 364 |
+
auto_dict = Path(args.model).parent / "dictionary.txt"
|
| 365 |
+
if auto_dict.exists():
|
| 366 |
+
dictionary = load_dictionary(auto_dict)
|
| 367 |
+
|
| 368 |
+
# Load CRF scorer if model provided
|
| 369 |
+
tagger = None
|
| 370 |
+
if args.model:
|
| 371 |
+
model_path = Path(args.model)
|
| 372 |
+
print(f"Loading CRF model: {model_path}")
|
| 373 |
+
tagger = load_crf_scorer(model_path)
|
| 374 |
+
print("Per-span confidence scoring enabled")
|
| 375 |
+
|
| 376 |
+
# Parse TSV
|
| 377 |
+
tsv_rows = parse_tsv(args.tsv)
|
| 378 |
+
print(f"Loaded {len(tsv_rows)} rows from {args.tsv}")
|
| 379 |
+
|
| 380 |
+
# Parse BIO files (only dev and test — train sentences not in top-500)
|
| 381 |
+
bio_data = {}
|
| 382 |
+
for split in ("dev", "test"):
|
| 383 |
+
bio_path = root / f"udd-ws-v1.1-{split}.txt"
|
| 384 |
+
if not bio_path.exists():
|
| 385 |
+
print(f"WARNING: {bio_path} not found, skipping")
|
| 386 |
+
continue
|
| 387 |
+
sentences = parse_bio_file(bio_path)
|
| 388 |
+
bio_data[split] = sentences
|
| 389 |
+
print(f"Loaded {len(sentences)} sentences from {bio_path}")
|
| 390 |
+
|
| 391 |
+
# Build tasks
|
| 392 |
+
tasks = []
|
| 393 |
+
errors = []
|
| 394 |
+
|
| 395 |
+
for row in tsv_rows:
|
| 396 |
+
split = row["file"]
|
| 397 |
+
sent_idx = row["sent_idx"]
|
| 398 |
+
|
| 399 |
+
if split not in bio_data:
|
| 400 |
+
errors.append(f"Rank {row['rank']}: split '{split}' not loaded")
|
| 401 |
+
continue
|
| 402 |
+
if sent_idx >= len(bio_data[split]):
|
| 403 |
+
errors.append(
|
| 404 |
+
f"Rank {row['rank']}: sent_idx {sent_idx} >= "
|
| 405 |
+
f"{len(bio_data[split])} sentences in {split}"
|
| 406 |
+
)
|
| 407 |
+
continue
|
| 408 |
+
|
| 409 |
+
sent = bio_data[split][sent_idx]
|
| 410 |
+
text = nfc(" ".join(sent["syllables"]))
|
| 411 |
+
|
| 412 |
+
# Verify text matches TSV
|
| 413 |
+
tsv_text = nfc(row["text"])
|
| 414 |
+
if text != tsv_text:
|
| 415 |
+
errors.append(
|
| 416 |
+
f"Rank {row['rank']}: text mismatch:\n"
|
| 417 |
+
f" BIO: {text!r}\n TSV: {tsv_text!r}"
|
| 418 |
+
)
|
| 419 |
+
continue
|
| 420 |
+
|
| 421 |
+
# Compute per-syllable confidence if CRF model available
|
| 422 |
+
confidences = None
|
| 423 |
+
if tagger is not None:
|
| 424 |
+
confidences = compute_confidences(
|
| 425 |
+
tagger, sent["syllables"], dictionary
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
# Convert BIO to spans
|
| 429 |
+
try:
|
| 430 |
+
spans = bio_to_spans(
|
| 431 |
+
sent["syllables"], sent["tags"], text, confidences
|
| 432 |
+
)
|
| 433 |
+
except ValueError as e:
|
| 434 |
+
errors.append(f"Rank {row['rank']}: span conversion error: {e}")
|
| 435 |
+
continue
|
| 436 |
+
|
| 437 |
+
# Validate span coverage
|
| 438 |
+
if args.validate:
|
| 439 |
+
try:
|
| 440 |
+
validate_spans(spans, text, sent["tags"])
|
| 441 |
+
except ValueError as e:
|
| 442 |
+
errors.append(f"Rank {row['rank']}: validation error: {e}")
|
| 443 |
+
continue
|
| 444 |
+
|
| 445 |
+
# Task-level prediction score = 1 - AL uncertainty score
|
| 446 |
+
pred_score = round(1.0 - row["score"], 6)
|
| 447 |
+
|
| 448 |
+
# Add dictionary metadata to spans + build iframe for search
|
| 449 |
+
dict_iframe = ""
|
| 450 |
+
if dictionary:
|
| 451 |
+
add_dict_meta(spans, dictionary)
|
| 452 |
+
dict_iframe = build_dict_iframe(spans, text)
|
| 453 |
+
|
| 454 |
+
task = {
|
| 455 |
+
"data": {
|
| 456 |
+
"text": text,
|
| 457 |
+
"sent_id": sent["sent_id"],
|
| 458 |
+
"rank": row["rank"],
|
| 459 |
+
"file": split,
|
| 460 |
+
"sent_idx": sent_idx,
|
| 461 |
+
"dict_iframe": dict_iframe,
|
| 462 |
+
},
|
| 463 |
+
"predictions": [{
|
| 464 |
+
"model_version": "silver_crf_v1.1",
|
| 465 |
+
"score": pred_score,
|
| 466 |
+
"result": spans,
|
| 467 |
+
}],
|
| 468 |
+
}
|
| 469 |
+
tasks.append(task)
|
| 470 |
+
|
| 471 |
+
# Report
|
| 472 |
+
if errors:
|
| 473 |
+
print(f"\n{len(errors)} errors:")
|
| 474 |
+
for e in errors:
|
| 475 |
+
print(f" - {e}")
|
| 476 |
+
|
| 477 |
+
print(f"\nGenerated {len(tasks)} tasks")
|
| 478 |
+
|
| 479 |
+
if tagger is not None:
|
| 480 |
+
# Report confidence statistics
|
| 481 |
+
all_scores = []
|
| 482 |
+
for t in tasks:
|
| 483 |
+
for r in t["predictions"][0]["result"]:
|
| 484 |
+
if "score" in r:
|
| 485 |
+
all_scores.append(r["score"])
|
| 486 |
+
if all_scores:
|
| 487 |
+
low = sum(1 for s in all_scores if s < 0.8)
|
| 488 |
+
print(f"Span confidence: min={min(all_scores):.4f}, "
|
| 489 |
+
f"mean={sum(all_scores)/len(all_scores):.4f}, "
|
| 490 |
+
f"<0.8: {low}/{len(all_scores)}")
|
| 491 |
+
|
| 492 |
+
# Write output
|
| 493 |
+
output_path = Path(args.output)
|
| 494 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 495 |
+
json.dump(tasks, f, ensure_ascii=False, indent=2)
|
| 496 |
+
print(f"Written to {output_path}")
|
| 497 |
+
|
| 498 |
+
if errors:
|
| 499 |
+
sys.exit(1)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
if __name__ == "__main__":
|
| 503 |
+
main()
|