UDD-1 / src /fix_ws_errors.py
rain1024's picture
Restructure technical report to ACL format, add Phase 0 gold eval methodology
bbc68b5
# /// script
# requires-python = ">=3.9"
# dependencies = []
# ///
"""Fix known word segmentation errors in UDD-1.1 BIO files.
Seven fix passes:
1. Split cross-boundary merges (uppercase mid-token signals)
1.5 Split long tokens (5+ syllables) via vocab-based greedy decomposition
2. Merge always-split compounds (dictionary compounds + inconsistent forms)
2.5 Split foreign word merges (Latin-script tokens without Vietnamese diacritics)
2.75 Split proper name boundary merges (uppercase→lowercase transitions within words)
3. Validate BIO invariants
Usage:
uv run src/fix_ws_errors.py # Fix all splits, write report
uv run src/fix_ws_errors.py --dry-run # Report only, no file changes
"""
import argparse
import re
import sys
from collections import Counter, defaultdict
from os.path import dirname, isfile, join
# ============================================================================
# Constants
# ============================================================================
# Compounds that should ALWAYS be merged (conservative curated list)
# Source: SEGMENTATION_EVAL.md sections 2c, 3, 6c + annotation guidelines
# Stored as tuples of lowercase syllables
MERGE_TERMS = {
# Always-split dictionary compounds (high confidence from 6c)
("vụ", "án"), # 892× split, legal compound
("phạt", "tù"), # 422× split
("hủy", "bỏ"), # 147× split
("chữa", "bệnh"), # 112× split
("lời", "khai"), # 102× split
("kèm", "theo"), # 101× split
("ghi", "rõ"), # 99× split
("trả", "lại"), # 94× split
("khám", "bệnh"), # 57× split
("rút", "gọn"), # 51× split
("giấy", "chứng", "nhận"), # 41× split, 3 syllables
("tù", "chung", "thân"), # 38× split, 3 syllables
("quá", "hạn"), # 31× split
("làm", "chủ"), # 30× split
("ô", "nhiễm", "môi", "trường"), # 26× split, 4 syllables
# Inconsistent forms — majority is split, should be single
("phiên", "tòa"), # 576 split vs 18 single
("hàng", "hóa"), # 175 split vs 6 single
("chủ", "tọa"), # 125 split vs 7 single
("bị", "hại"), # 96 split vs 6 single
("tiền", "công"), # 62 split vs 2 single
("thuê", "khoán"), # 62 split vs 2 single
("hòa", "giải"), # 53 split vs 30 single
("bốc", "hàng"), # 35 split vs 1 single
# ---- Cycle 1 gold corrections: new compound merges ----
("ủy", "ban"), # committee
("lính", "thú"), # soldier
("mu", "rùa"), # turtle shell
("trêu", "ghẹo"), # tease
("sương", "mai"), # morning dew
("mái", "nhà"), # roof
("nghiến", "răng"), # gnash teeth
("nheo", "nheo"), # squint
("dơn", "dớt"), # pale/sickly
("xua", "tay"), # wave hand
("nói", "gở"), # say unlucky things
("bơi", "chó"), # dog paddle
("người", "thương"), # beloved
("chăn", "lợn"), # pig herding
("khay", "trà"), # tea tray
("đồng", "tự"), # homograph
("tại", "ngũ"), # in service (military)
("hành", "chánh"), # administration
("lượng", "tử"), # quantum
("tích", "lũy"), # accumulate
("siêu", "máy", "tính"), # supercomputer
("đường", "thẳng"), # straight line
("đầm", "đuôi", "cá"), # fishtail dress
("như", "điên"), # like crazy
("tẩy", "chay"), # boycott
}
# Build index for efficient longest-match lookup: {length: [term, ...]}
_MERGE_BY_LENGTH = defaultdict(list)
for _term in MERGE_TERMS:
_MERGE_BY_LENGTH[len(_term)].append(_term)
MERGE_MAX_LEN = max(len(t) for t in MERGE_TERMS)
# Tokens with uppercase mid-token that are LEGITIMATE (not errors).
# Stored as lowercase strings (space-joined syllables) for comparison.
# Source: SEGMENTATION_EVAL.md section 2b — proper names and titles.
CROSS_BOUNDARY_WHITELIST = {
"xã hội chủ nghĩa việt nam",
"bộ tư pháp",
"mặt trận tổ quốc việt nam",
"đảng cộng sản việt nam",
"tổng liên đoàn lao động",
"hội đồng trọng tài",
"chủ tịch", # all-caps title
"đại lý", # all-caps title
"nguyễn sinh hùng", # personal name
"luật bảo hiểm xã hội",
"luật bảo vệ",
"bộ luật lao động",
"pháp lệnh dân số",
"bộ tài nguyên và môi trường",
# Roman numeral sessions
"khóa xiii",
"khóa xv",
"khóa xiv",
"khóa xii",
"khóa xi",
}
# ============================================================================
# BIO file I/O
# ============================================================================
def parse_bio_file(filepath):
"""Parse BIO file into list of sentences.
Returns list of dicts with keys: sent_id, text, syllables, tags.
"""
sentences = []
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\n")
if line.startswith("# sent_id = "):
current["sent_id"] = line.split("= ", 1)[1]
continue
if line.startswith("# text = "):
current["text"] = line.split("= ", 1)[1]
continue
if line.startswith("#"):
continue
if not line:
if current["syllables"]:
sentences.append(dict(current))
current = {"sent_id": "", "text": "", "syllables": [], "tags": []}
continue
parts = line.split("\t")
if len(parts) == 2:
current["syllables"].append(parts[0])
current["tags"].append(parts[1])
if current["syllables"]:
sentences.append(dict(current))
return sentences
def write_bio_file(sentences, filepath):
"""Write sentences back to BIO format."""
with open(filepath, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
for syl, tag in zip(sent["syllables"], sent["tags"]):
f.write(f"{syl}\t{tag}\n")
f.write("\n")
def bio_to_words(syllables, tags):
"""Convert syllable-level BIO tags to word list."""
words = []
current = []
for syl, tag in zip(syllables, tags):
if tag == "B-W":
if current:
words.append(" ".join(current))
current = [syl]
else:
current.append(syl)
if current:
words.append(" ".join(current))
return words
def write_conllu(sentences, filepath):
"""Write sentences to CoNLL-U format (word-level, no syntactic annotation)."""
with open(filepath, "w", encoding="utf-8") as f:
for sent in sentences:
f.write(f"# sent_id = {sent['sent_id']}\n")
f.write(f"# text = {sent['text']}\n")
words = bio_to_words(sent["syllables"], sent["tags"])
for i, word in enumerate(words, 1):
f.write(f"{i}\t{word}\t_\t_\t_\t_\t_\t_\t_\t_\n")
f.write("\n")
# ============================================================================
# Vocab for long-token splitting
# ============================================================================
def build_split_vocab(all_sentences, min_count=5):
"""Build vocab of 2-4 syllable words for decomposing long tokens.
Counts every 2-4 syllable word in the dataset (case-insensitive).
Returns the set of forms that appear at least `min_count` times.
Excludes entries with function words at boundaries (e.g. "tài nguyên và").
"""
# Function words that should never be at the edge of a compound
boundary_stopwords = {"và", "hoặc"}
vocab = Counter()
for sent in all_sentences:
words = bio_to_words(sent["syllables"], sent["tags"])
for w in words:
syls = w.split()
if 2 <= len(syls) <= 4:
form = " ".join(s.lower() for s in syls)
vocab[form] += 1
return {
form for form, count in vocab.items()
if count >= min_count
and form.split()[0] not in boundary_stopwords
and form.split()[-1] not in boundary_stopwords
}
def build_viet_syllables(all_sentences, min_count=50):
"""Build set of common Vietnamese syllables for foreign word filtering.
Counts individual syllables across all sentences and returns those
appearing at least `min_count` times (lowercased). These are used to
distinguish Vietnamese multi-syllable words (like "kinh doanh") from
truly foreign tokens (like "Max Planck").
"""
counts = Counter()
for sent in all_sentences:
for syl in sent["syllables"]:
counts[syl.lower()] += 1
return {syl for syl, c in counts.items() if c >= min_count}
# ============================================================================
# Fix passes
# ============================================================================
def fix_cross_boundary(syllables, tags):
"""Pass 1: Split cross-boundary merges.
Detects transitions from a non-uppercase syllable to an uppercase syllable
within a multi-syllable word. This catches errors like "tố tụng Người"
(lowercase "tụng" → uppercase "Người") while preserving proper names like
"Việt Nam" (both uppercase, no transition).
If the word (lowercased) is in CROSS_BOUNDARY_WHITELIST, skip it.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans: list of (start_idx, end_idx) for each word
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
if end - start < 2:
continue # single-syllable word
# Check for lowercase→uppercase transitions within the word.
# A transition is: preceding syllable does NOT start with uppercase,
# AND current syllable starts with uppercase. This catches real
# cross-boundary merges (e.g., "tụng" → "Người") while ignoring
# proper names where all syllables are uppercase (e.g., "Việt Nam").
has_transition = False
for j in range(start + 1, end):
prev_syl = syllables[j - 1]
curr_syl = syllables[j]
if (curr_syl and curr_syl[0].isupper() and
prev_syl and not prev_syl[0].isupper()):
has_transition = True
break
if not has_transition:
continue
# Check whitelist
word_lower = " ".join(s.lower() for s in syllables[start:end])
if word_lower in CROSS_BOUNDARY_WHITELIST:
continue
# Split at each lowercase→uppercase transition
word_before = " ".join(syllables[start:end])
for j in range(start + 1, end):
prev_syl = syllables[j - 1]
curr_syl = syllables[j]
if (curr_syl and curr_syl[0].isupper() and
prev_syl and not prev_syl[0].isupper()):
new_tags[j] = "B-W"
word_parts = bio_to_words(syllables[start:end], new_tags[start:end])
changes.append(f"split \"{word_before}\" → {' + '.join(repr(p) for p in word_parts)}")
return new_tags, changes
def fix_split_long_tokens(syllables, tags, vocab):
"""Pass 1.5: Split 5+ syllable tokens into sub-words via vocab decomposition.
For each word with 5+ syllables, apply greedy left-to-right longest-match
against the vocab (trying 4→3→2 syllable matches). Unmatched syllables
become single-syllable words.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 5:
continue
# Greedy left-to-right longest-match decomposition
word_before = " ".join(syllables[start:end])
pos = start
sub_words = [] # list of (sub_start, sub_end) index pairs
while pos < end:
matched = False
# Try longest match first (4 → 3 → 2 syllables)
for length in range(min(4, end - pos), 1, -1):
candidate = " ".join(
s.lower() for s in syllables[pos:pos + length]
)
if candidate in vocab:
sub_words.append((pos, pos + length))
pos += length
matched = True
break
if not matched:
sub_words.append((pos, pos + 1))
pos += 1
# Only change if we actually split into multiple sub-words
if len(sub_words) <= 1:
continue
# Update tags: B-W at start of each sub-word, I-W within
for sw_start, sw_end in sub_words:
new_tags[sw_start] = "B-W"
for j in range(sw_start + 1, sw_end):
new_tags[j] = "I-W"
word_parts = bio_to_words(
syllables[start:end], new_tags[start:end]
)
changes.append(
f"split \"{word_before}\" → "
f"{' + '.join(repr(p) for p in word_parts)}"
)
return new_tags, changes
def fix_merge_compounds(syllables, tags):
"""Pass 2: Merge always-split compounds.
Scan syllables left-to-right. At each B-W position, check if the next N
syllables (all at B-W positions) match a MERGE_TERMS entry (case-insensitive).
Longest match first. If so, change subsequent B-W tags to I-W.
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
n = len(syllables)
i = 0
while i < n:
if new_tags[i] != "B-W":
i += 1
continue
matched = False
# Try longest match first
for length in range(min(MERGE_MAX_LEN, n - i), 1, -1):
if length not in _MERGE_BY_LENGTH:
continue
# Check all syllables in range are B-W (separate words)
all_bw = True
for j in range(i, i + length):
if j > i and new_tags[j] != "B-W":
all_bw = False
break
if not all_bw:
continue
# Check if syllables match any MERGE_TERMS entry
candidate = tuple(s.lower() for s in syllables[i:i + length])
if candidate in MERGE_TERMS:
# Merge: change B-W to I-W for positions after the first
parts_before = [syllables[j] for j in range(i, i + length)]
for j in range(i + 1, i + length):
new_tags[j] = "I-W"
merged = " ".join(parts_before)
changes.append(f"merge \"{merged}\"")
i += length
matched = True
break
if not matched:
i += 1
return new_tags, changes
def _is_latin_no_vietnamese(s):
"""Check if a string is purely Latin-script without Vietnamese diacritics.
Returns True for ASCII Latin (a-z, A-Z, 0-9, hyphen) and common Latin
extensions BUT NOT Vietnamese-specific characters (ă, â, đ, ê, ô, ơ, ư
and their tone marks).
"""
# Vietnamese diacritics pattern: any character with Vietnamese-specific marks
vietnamese_chars = re.compile(
r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợ'
r'ùúủũụưứừửữựỳýỷỹỵđÀÁẢÃẠĂẮẰẲẴẶÂẤẦẨẪẬÈÉẺẼẸÊẾỀỂỄỆÌÍỈĨỊ'
r'ÒÓỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢÙÚỦŨỤƯỨỪỬỮỰỲÝỶỸỴĐ]'
)
if vietnamese_chars.search(s):
return False
# Must contain at least one Latin letter
return bool(re.search(r'[a-zA-Z]', s))
# Known foreign proper names that should stay merged (whitelist)
FOREIGN_NAME_WHITELIST = {
"beethoven", "homer", "odysseus", "cecelia", "ahern", "holly",
"hideoshi", "gurth", "euler", "hilbert", "rydberg", "bohr",
"frankael-zermelo", "giambattista", "valli", "dachau",
"habsburg", "newton", "einstein", "darwin", "shakespeare",
}
def fix_foreign_words(syllables, tags, viet_syllables):
"""Pass 2.5: Split foreign word merges.
Detects multi-syllable tokens where ALL syllables are Latin-script only
(no Vietnamese diacritics) AND none of the syllables are common Vietnamese
syllables. Each such foreign syllable becomes its own word (B-W).
Args:
syllables: list of syllable strings.
tags: list of BIO tag strings.
viet_syllables: set of common Vietnamese syllables (lowercase) for
filtering out false positives like "kinh doanh".
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 2:
continue
# Check if ALL syllables are Latin-only (no Vietnamese diacritics)
all_latin = all(_is_latin_no_vietnamese(syllables[j]) for j in range(start, end))
if not all_latin:
continue
# Check if ANY syllable is a common Vietnamese syllable → skip
has_viet = any(
syllables[j].lower() in viet_syllables for j in range(start, end)
)
if has_viet:
continue
# Check whitelist: if the whole token is a known name, skip
token_lower = " ".join(syllables[start:end]).lower()
if token_lower in FOREIGN_NAME_WHITELIST:
continue
# Split: make each syllable its own word
word_before = " ".join(syllables[start:end])
for j in range(start + 1, end):
new_tags[j] = "B-W"
parts = [syllables[j] for j in range(start, end)]
changes.append(f"split-foreign \"{word_before}\" → {' + '.join(repr(p) for p in parts)}")
return new_tags, changes
# Vietnamese institutional compound prefixes that should NOT be split by
# the name-boundary pass. Lowercased prefix tuples → the compound is legitimate.
# First-syllable prefixes that start Vietnamese institutional compounds.
# Any multi-syllable word starting with one of these + lowercase continuation
# is likely a legitimate compound, not a proper name boundary error.
NAME_BOUNDARY_WHITELIST_S1 = {
"ủy", # Ủy ban (nhân dân / thường vụ / ...)
"viện", # Viện kiểm sát / Viện nghiên cứu
"tổng", # Tổng giám đốc / Tổng thư ký / ...
"nhà", # Nhà khoa học / Nhà xuất bản / Nhà đầu tư
"phòng", # Phòng thí nghiệm
"cảng", # Cảng hàng không
"xuất", # Xuất nhập khẩu
"sách", # Sách (title compounds)
"thuế", # Thuế thu nhập
"cây", # Cây lương thực
"nói", # Nói tóm lại
"bộ", # Bộ luật dân sự / Bộ Tài chính
"đại", # Đại hội đồng
"lò", # Lò phản ứng
"ngay", # Ngay lập tức
"việc", # Việc làm ăn
"vùng", # Vùng kinh tế
"sân", # Sân vận động
"tiểu", # Tiểu văn hóa
"trang", # Trang thiết bị
"tết", # Tết dương lịch
"thuyết", # Thuyết sinh vật học
"điểm", # Điểm nóng chảy
"lý", # Lý thuyết
"hệ", # Hệ tiên đề
}
def fix_proper_name_boundary(syllables, tags, vocab):
"""Pass 2.75: Split proper name boundary merges.
Detects multi-syllable tokens where uppercase syllables are followed by
a lowercase common word (in vocab). Pattern: [Uppercase...][lowercase_common]
→ split before the lowercase word.
This catches cases like "Tống_tiêu_diệt" → "Tống" + "tiêu_diệt" where
a proper name is merged with the following verb/noun.
Skips known Vietnamese institutional compounds (NAME_BOUNDARY_WHITELIST_PREFIXES).
Returns (new_tags, list of change descriptions).
"""
new_tags = list(tags)
changes = []
# Reconstruct word spans
word_spans = []
current_start = 0
for i in range(len(tags)):
if tags[i] == "B-W" and i > 0:
word_spans.append((current_start, i))
current_start = i
word_spans.append((current_start, len(tags)))
for start, end in word_spans:
n_syls = end - start
if n_syls < 3:
# Need at least 3 syllables: Name + common_word(s)
continue
# Check whitelist: if the first syllable is a known institutional prefix, skip
if syllables[start].lower() in NAME_BOUNDARY_WHITELIST_S1:
continue
# Find the transition point: last uppercase syllable before lowercase
# Look for pattern: [Title/Upper...][lower...]
# where the lowercase portion forms a known vocab word
split_pos = None
for j in range(start + 1, end):
curr_syl = syllables[j]
prev_syl = syllables[j - 1]
# Transition: previous is title/upper, current is lowercase
if (prev_syl and prev_syl[0].isupper() and
curr_syl and not curr_syl[0].isupper()):
# Check if remaining syllables form a vocab word
remaining = " ".join(s.lower() for s in syllables[j:end])
if remaining in vocab:
split_pos = j
break
# Also check if just 2 syllables from here form a vocab word
if end - j >= 2:
two_syl = " ".join(s.lower() for s in syllables[j:j+2])
if two_syl in vocab:
split_pos = j
break
if split_pos is None:
continue
word_before = " ".join(syllables[start:end])
new_tags[split_pos] = "B-W"
word_parts = bio_to_words(syllables[start:end], new_tags[start:end])
changes.append(
f"split-name-boundary \"{word_before}\" → "
f"{' + '.join(repr(p) for p in word_parts)}"
)
return new_tags, changes
def validate_sentence(syllables, tags):
"""Pass 3: Validate BIO invariants.
Returns list of error descriptions (empty if valid).
"""
errors = []
if not syllables:
return errors
if tags[0] != "B-W":
errors.append(f"sentence starts with {tags[0]} instead of B-W")
for i, tag in enumerate(tags):
if tag not in ("B-W", "I-W"):
errors.append(f"position {i}: invalid tag '{tag}'")
return errors
# ============================================================================
# Report generation
# ============================================================================
def generate_report(all_stats, output_path=None):
"""Generate markdown report of all changes."""
lines = []
lines.append("# WS Fix Report")
lines.append("")
lines.append("Fixes applied by `src/fix_ws_errors.py` to UDD-1.1 word segmentation BIO files.")
lines.append("")
# Summary table
lines.append("## Summary")
lines.append("")
lines.append("| File | Cross-boundary | Long token | Compound merges | Foreign splits | Name boundary | Validation errors |")
lines.append("|------|---------------:|-----------:|----------------:|---------------:|--------------:|------------------:|")
total_splits = 0
total_long_splits = 0
total_merges = 0
total_foreign = 0
total_name_boundary = 0
total_errors = 0
for fname, stats in all_stats.items():
n_splits = stats["n_cross_boundary"]
n_long = stats["n_split_long"]
n_merges = stats["n_merge"]
n_foreign = stats.get("n_foreign", 0)
n_name_boundary = stats.get("n_name_boundary", 0)
n_errors = stats["n_validation_errors"]
total_splits += n_splits
total_long_splits += n_long
total_merges += n_merges
total_foreign += n_foreign
total_name_boundary += n_name_boundary
total_errors += n_errors
lines.append(f"| {fname} | {n_splits:,} | {n_long:,} | {n_merges:,} | {n_foreign:,} | {n_name_boundary:,} | {n_errors:,} |")
lines.append(f"| **TOTAL** | **{total_splits:,}** | **{total_long_splits:,}** | **{total_merges:,}** | **{total_foreign:,}** | **{total_name_boundary:,}** | **{total_errors:,}** |")
lines.append("")
# Merge term frequency across all files
lines.append("## Merge Frequency by Term")
lines.append("")
merge_counts = Counter()
for stats in all_stats.values():
merge_counts += stats["merge_term_counts"]
lines.append("| Term | Count |")
lines.append("|:-----|------:|")
for term, count in merge_counts.most_common():
lines.append(f"| {term} | {count:,} |")
lines.append("")
# Cross-boundary split examples
lines.append("## Cross-Boundary Split Examples")
lines.append("")
for fname, stats in all_stats.items():
if stats["cross_boundary_examples"]:
lines.append(f"### {fname}")
lines.append("")
for ex in stats["cross_boundary_examples"][:20]:
lines.append(f"- {ex}")
if len(stats["cross_boundary_examples"]) > 20:
lines.append(f"- ... and {len(stats['cross_boundary_examples']) - 20} more")
lines.append("")
# Long token split examples
lines.append("## Long Token Split Examples")
lines.append("")
for fname, stats in all_stats.items():
if stats["split_long_examples"]:
lines.append(f"### {fname}")
lines.append("")
for ex in stats["split_long_examples"][:30]:
lines.append(f"- {ex}")
if len(stats["split_long_examples"]) > 30:
lines.append(f"- ... and {len(stats['split_long_examples']) - 30} more")
lines.append("")
# Foreign word split examples
lines.append("## Foreign Word Split Examples")
lines.append("")
for fname, stats in all_stats.items():
examples = stats.get("foreign_examples", [])
if examples:
lines.append(f"### {fname}")
lines.append("")
for ex in examples[:30]:
lines.append(f"- {ex}")
if len(examples) > 30:
lines.append(f"- ... and {len(examples) - 30} more")
lines.append("")
# Name boundary split examples
lines.append("## Name Boundary Split Examples")
lines.append("")
for fname, stats in all_stats.items():
examples = stats.get("name_boundary_examples", [])
if examples:
lines.append(f"### {fname}")
lines.append("")
for ex in examples[:30]:
lines.append(f"- {ex}")
if len(examples) > 30:
lines.append(f"- ... and {len(examples) - 30} more")
lines.append("")
report = "\n".join(lines)
if output_path:
with open(output_path, "w", encoding="utf-8") as f:
f.write(report)
print(f"\nReport written to {output_path}")
return report
# ============================================================================
# Main
# ============================================================================
def process_file(filepath, vocab=None, viet_syllables=None, sentences=None, dry_run=False):
"""Process a single BIO file: apply fixes, optionally write back.
Args:
filepath: Path to BIO file.
vocab: Set of known 2-4 syllable words for long-token splitting.
If None, Pass 1.5 and 2.75 are skipped.
viet_syllables: Set of common Vietnamese syllables for foreign word
filtering. If None, Pass 2.5 is skipped.
sentences: Pre-parsed sentences (avoids re-parsing if already loaded).
dry_run: If True, report changes without modifying files.
Returns (sentences, stats_dict).
"""
print(f"\nProcessing {filepath}...")
if sentences is None:
sentences = parse_bio_file(filepath)
print(f" Loaded {len(sentences):,} sentences")
total_syllables_before = sum(len(s["syllables"]) for s in sentences)
total_words_before = sum(
sum(1 for t in s["tags"] if t == "B-W") for s in sentences
)
n_cross_boundary = 0
n_split_long = 0
n_merge = 0
n_foreign = 0
n_name_boundary = 0
n_validation_errors = 0
cross_boundary_examples = []
split_long_examples = []
foreign_examples = []
name_boundary_examples = []
merge_term_counts = Counter()
for sent in sentences:
syls = sent["syllables"]
# Pass 1: Cross-boundary splits
tags, cb_changes = fix_cross_boundary(syls, sent["tags"])
n_cross_boundary += len(cb_changes)
for ch in cb_changes:
cross_boundary_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 1.5: Split long tokens (5+ syllables)
if vocab is not None:
tags, split_changes = fix_split_long_tokens(syls, tags, vocab)
n_split_long += len(split_changes)
for ch in split_changes:
split_long_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 2: Merge compounds
tags, merge_changes = fix_merge_compounds(syls, tags)
n_merge += len(merge_changes)
for ch in merge_changes:
# Extract the merged term for counting
# Format: 'merge "term"'
term = ch.split('"')[1] if '"' in ch else ch
merge_term_counts[term.lower()] += 1
# Pass 2.5: Split foreign word merges
if viet_syllables is not None:
tags, fw_changes = fix_foreign_words(syls, tags, viet_syllables)
n_foreign += len(fw_changes)
for ch in fw_changes:
foreign_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 2.75: Split proper name boundary merges
if vocab is not None:
tags, nb_changes = fix_proper_name_boundary(syls, tags, vocab)
n_name_boundary += len(nb_changes)
for ch in nb_changes:
name_boundary_examples.append(f"[{sent['sent_id']}] {ch}")
# Pass 3: Validate
errors = validate_sentence(syls, tags)
n_validation_errors += len(errors)
if errors:
print(f" WARN [{sent['sent_id']}]: {'; '.join(errors)}")
sent["tags"] = tags
total_syllables_after = sum(len(s["syllables"]) for s in sentences)
total_words_after = sum(
sum(1 for t in s["tags"] if t == "B-W") for s in sentences
)
print(f" Cross-boundary splits: {n_cross_boundary:,}")
print(f" Long token splits: {n_split_long:,}")
print(f" Compound merges: {n_merge:,}")
print(f" Foreign word splits: {n_foreign:,}")
print(f" Name boundary splits: {n_name_boundary:,}")
print(f" Validation errors: {n_validation_errors:,}")
print(f" Words: {total_words_before:,}{total_words_after:,} "
f"(Δ{total_words_after - total_words_before:+,})")
assert total_syllables_before == total_syllables_after, \
f"Syllable count changed: {total_syllables_before}{total_syllables_after}"
print(f" Syllables: {total_syllables_before:,} (unchanged)")
if not dry_run:
write_bio_file(sentences, filepath)
print(f" Written: {filepath}")
# Also regenerate CoNLL-U
conllu_path = filepath.replace(".txt", ".conllu")
write_conllu(sentences, conllu_path)
print(f" Written: {conllu_path}")
stats = {
"n_cross_boundary": n_cross_boundary,
"n_split_long": n_split_long,
"n_merge": n_merge,
"n_foreign": n_foreign,
"n_name_boundary": n_name_boundary,
"n_validation_errors": n_validation_errors,
"cross_boundary_examples": cross_boundary_examples,
"split_long_examples": split_long_examples,
"foreign_examples": foreign_examples,
"name_boundary_examples": name_boundary_examples,
"merge_term_counts": merge_term_counts,
"words_before": total_words_before,
"words_after": total_words_after,
}
return sentences, stats
def main():
parser = argparse.ArgumentParser(
description="Fix known word segmentation errors in UDD-1.1 BIO files."
)
parser.add_argument(
"--dry-run", action="store_true",
help="Report changes without modifying files"
)
args = parser.parse_args()
base_dir = dirname(dirname(__file__))
bio_files = [
join(base_dir, f"udd-ws-v1.1-{split}.txt")
for split in ("train", "dev", "test")
]
# Check all files exist
for path in bio_files:
if not isfile(path):
print(f"ERROR: {path} not found", file=sys.stderr)
sys.exit(1)
if args.dry_run:
print("=== DRY RUN — no files will be modified ===")
# Phase 1: Parse all files
all_sentences_by_file = {}
for path in bio_files:
print(f"Loading {path}...")
all_sentences_by_file[path] = parse_bio_file(path)
print(f" {len(all_sentences_by_file[path]):,} sentences")
# Phase 2: Build vocab from all sentences
all_sents = [s for sents in all_sentences_by_file.values() for s in sents]
vocab = build_split_vocab(all_sents)
print(f"\nBuilt split vocab: {len(vocab):,} entries "
f"(2-4 syllable words with count >= 5)")
viet_syllables = build_viet_syllables(all_sents)
print(f"Built Vietnamese syllable set: {len(viet_syllables):,} entries "
f"(syllables with count >= 50)")
# Phase 3: Process each file
all_stats = {}
for path in bio_files:
fname = path.rsplit("/", 1)[-1]
_, stats = process_file(
path,
vocab=vocab,
viet_syllables=viet_syllables,
sentences=all_sentences_by_file[path],
dry_run=args.dry_run,
)
all_stats[fname] = stats
# Generate report
report_path = join(base_dir, "WS_FIX_REPORT.md")
if not args.dry_run:
generate_report(all_stats, report_path)
else:
report = generate_report(all_stats)
print("\n" + report)
# Final summary
total_splits = sum(s["n_cross_boundary"] for s in all_stats.values())
total_long = sum(s["n_split_long"] for s in all_stats.values())
total_merges = sum(s["n_merge"] for s in all_stats.values())
total_foreign = sum(s.get("n_foreign", 0) for s in all_stats.values())
total_name_boundary = sum(s.get("n_name_boundary", 0) for s in all_stats.values())
total_errors = sum(s["n_validation_errors"] for s in all_stats.values())
print(f"\n{'='*50}")
print(f"TOTAL: {total_splits:,} cross-boundary splits, "
f"{total_long:,} long token splits, "
f"{total_merges:,} compound merges, "
f"{total_foreign:,} foreign word splits, "
f"{total_name_boundary:,} name boundary splits, "
f"{total_errors:,} validation errors")
if args.dry_run:
print("(dry run — no files modified)")
if __name__ == "__main__":
main()