| | |
| | |
| | |
| | |
| | """Fix known word segmentation errors in UDD-1.1 BIO files. |
| | |
| | Seven fix passes: |
| | 1. Split cross-boundary merges (uppercase mid-token signals) |
| | 1.5 Split long tokens (5+ syllables) via vocab-based greedy decomposition |
| | 2. Merge always-split compounds (dictionary compounds + inconsistent forms) |
| | 2.5 Split foreign word merges (Latin-script tokens without Vietnamese diacritics) |
| | 2.75 Split proper name boundary merges (uppercase→lowercase transitions within words) |
| | 3. Validate BIO invariants |
| | |
| | Usage: |
| | uv run src/fix_ws_errors.py # Fix all splits, write report |
| | uv run src/fix_ws_errors.py --dry-run # Report only, no file changes |
| | """ |
| |
|
| | import argparse |
| | import re |
| | import sys |
| | from collections import Counter, defaultdict |
| | from os.path import dirname, isfile, join |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | MERGE_TERMS = { |
| | |
| | ("vụ", "án"), |
| | ("phạt", "tù"), |
| | ("hủy", "bỏ"), |
| | ("chữa", "bệnh"), |
| | ("lời", "khai"), |
| | ("kèm", "theo"), |
| | ("ghi", "rõ"), |
| | ("trả", "lại"), |
| | ("khám", "bệnh"), |
| | ("rút", "gọn"), |
| | ("giấy", "chứng", "nhận"), |
| | ("tù", "chung", "thân"), |
| | ("quá", "hạn"), |
| | ("làm", "chủ"), |
| | ("ô", "nhiễm", "môi", "trường"), |
| | |
| | ("phiên", "tòa"), |
| | ("hàng", "hóa"), |
| | ("chủ", "tọa"), |
| | ("bị", "hại"), |
| | ("tiền", "công"), |
| | ("thuê", "khoán"), |
| | ("hòa", "giải"), |
| | ("bốc", "hàng"), |
| | |
| | ("ủy", "ban"), |
| | ("lính", "thú"), |
| | ("mu", "rùa"), |
| | ("trêu", "ghẹo"), |
| | ("sương", "mai"), |
| | ("mái", "nhà"), |
| | ("nghiến", "răng"), |
| | ("nheo", "nheo"), |
| | ("dơn", "dớt"), |
| | ("xua", "tay"), |
| | ("nói", "gở"), |
| | ("bơi", "chó"), |
| | ("người", "thương"), |
| | ("chăn", "lợn"), |
| | ("khay", "trà"), |
| | ("đồng", "tự"), |
| | ("tại", "ngũ"), |
| | ("hành", "chánh"), |
| | ("lượng", "tử"), |
| | ("tích", "lũy"), |
| | ("siêu", "máy", "tính"), |
| | ("đường", "thẳng"), |
| | ("đầm", "đuôi", "cá"), |
| | ("như", "điên"), |
| | ("tẩy", "chay"), |
| | } |
| |
|
| | |
| | _MERGE_BY_LENGTH = defaultdict(list) |
| | for _term in MERGE_TERMS: |
| | _MERGE_BY_LENGTH[len(_term)].append(_term) |
| | MERGE_MAX_LEN = max(len(t) for t in MERGE_TERMS) |
| |
|
| | |
| | |
| | |
| | CROSS_BOUNDARY_WHITELIST = { |
| | "xã hội chủ nghĩa việt nam", |
| | "bộ tư pháp", |
| | "mặt trận tổ quốc việt nam", |
| | "đảng cộng sản việt nam", |
| | "tổng liên đoàn lao động", |
| | "hội đồng trọng tài", |
| | "chủ tịch", |
| | "đại lý", |
| | "nguyễn sinh hùng", |
| | "luật bảo hiểm xã hội", |
| | "luật bảo vệ", |
| | "bộ luật lao động", |
| | "pháp lệnh dân số", |
| | "bộ tài nguyên và môi trường", |
| | |
| | "khóa xiii", |
| | "khóa xv", |
| | "khóa xiv", |
| | "khóa xii", |
| | "khóa xi", |
| | } |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def parse_bio_file(filepath): |
| | """Parse BIO file into list of sentences. |
| | |
| | Returns list of dicts with keys: sent_id, text, syllables, tags. |
| | """ |
| | sentences = [] |
| | current = {"sent_id": "", "text": "", "syllables": [], "tags": []} |
| |
|
| | with open(filepath, "r", encoding="utf-8") as f: |
| | for line in f: |
| | line = line.rstrip("\n") |
| | if line.startswith("# sent_id = "): |
| | current["sent_id"] = line.split("= ", 1)[1] |
| | continue |
| | if line.startswith("# text = "): |
| | current["text"] = line.split("= ", 1)[1] |
| | continue |
| | if line.startswith("#"): |
| | continue |
| | if not line: |
| | if current["syllables"]: |
| | sentences.append(dict(current)) |
| | current = {"sent_id": "", "text": "", "syllables": [], "tags": []} |
| | continue |
| | parts = line.split("\t") |
| | if len(parts) == 2: |
| | current["syllables"].append(parts[0]) |
| | current["tags"].append(parts[1]) |
| |
|
| | if current["syllables"]: |
| | sentences.append(dict(current)) |
| |
|
| | return sentences |
| |
|
| |
|
| | def write_bio_file(sentences, filepath): |
| | """Write sentences back to BIO format.""" |
| | with open(filepath, "w", encoding="utf-8") as f: |
| | for sent in sentences: |
| | f.write(f"# sent_id = {sent['sent_id']}\n") |
| | f.write(f"# text = {sent['text']}\n") |
| | for syl, tag in zip(sent["syllables"], sent["tags"]): |
| | f.write(f"{syl}\t{tag}\n") |
| | f.write("\n") |
| |
|
| |
|
| | def bio_to_words(syllables, tags): |
| | """Convert syllable-level BIO tags to word list.""" |
| | words = [] |
| | current = [] |
| | for syl, tag in zip(syllables, tags): |
| | if tag == "B-W": |
| | if current: |
| | words.append(" ".join(current)) |
| | current = [syl] |
| | else: |
| | current.append(syl) |
| | if current: |
| | words.append(" ".join(current)) |
| | return words |
| |
|
| |
|
| | def write_conllu(sentences, filepath): |
| | """Write sentences to CoNLL-U format (word-level, no syntactic annotation).""" |
| | with open(filepath, "w", encoding="utf-8") as f: |
| | for sent in sentences: |
| | f.write(f"# sent_id = {sent['sent_id']}\n") |
| | f.write(f"# text = {sent['text']}\n") |
| | words = bio_to_words(sent["syllables"], sent["tags"]) |
| | for i, word in enumerate(words, 1): |
| | f.write(f"{i}\t{word}\t_\t_\t_\t_\t_\t_\t_\t_\n") |
| | f.write("\n") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def build_split_vocab(all_sentences, min_count=5): |
| | """Build vocab of 2-4 syllable words for decomposing long tokens. |
| | |
| | Counts every 2-4 syllable word in the dataset (case-insensitive). |
| | Returns the set of forms that appear at least `min_count` times. |
| | Excludes entries with function words at boundaries (e.g. "tài nguyên và"). |
| | """ |
| | |
| | boundary_stopwords = {"và", "hoặc"} |
| |
|
| | vocab = Counter() |
| | for sent in all_sentences: |
| | words = bio_to_words(sent["syllables"], sent["tags"]) |
| | for w in words: |
| | syls = w.split() |
| | if 2 <= len(syls) <= 4: |
| | form = " ".join(s.lower() for s in syls) |
| | vocab[form] += 1 |
| | return { |
| | form for form, count in vocab.items() |
| | if count >= min_count |
| | and form.split()[0] not in boundary_stopwords |
| | and form.split()[-1] not in boundary_stopwords |
| | } |
| |
|
| |
|
| | def build_viet_syllables(all_sentences, min_count=50): |
| | """Build set of common Vietnamese syllables for foreign word filtering. |
| | |
| | Counts individual syllables across all sentences and returns those |
| | appearing at least `min_count` times (lowercased). These are used to |
| | distinguish Vietnamese multi-syllable words (like "kinh doanh") from |
| | truly foreign tokens (like "Max Planck"). |
| | """ |
| | counts = Counter() |
| | for sent in all_sentences: |
| | for syl in sent["syllables"]: |
| | counts[syl.lower()] += 1 |
| | return {syl for syl, c in counts.items() if c >= min_count} |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def fix_cross_boundary(syllables, tags): |
| | """Pass 1: Split cross-boundary merges. |
| | |
| | Detects transitions from a non-uppercase syllable to an uppercase syllable |
| | within a multi-syllable word. This catches errors like "tố tụng Người" |
| | (lowercase "tụng" → uppercase "Người") while preserving proper names like |
| | "Việt Nam" (both uppercase, no transition). |
| | |
| | If the word (lowercased) is in CROSS_BOUNDARY_WHITELIST, skip it. |
| | |
| | Returns (new_tags, list of change descriptions). |
| | """ |
| | new_tags = list(tags) |
| | changes = [] |
| |
|
| | |
| | word_spans = [] |
| | current_start = 0 |
| | for i in range(len(tags)): |
| | if tags[i] == "B-W" and i > 0: |
| | word_spans.append((current_start, i)) |
| | current_start = i |
| | word_spans.append((current_start, len(tags))) |
| |
|
| | for start, end in word_spans: |
| | if end - start < 2: |
| | continue |
| |
|
| | |
| | |
| | |
| | |
| | |
| | has_transition = False |
| | for j in range(start + 1, end): |
| | prev_syl = syllables[j - 1] |
| | curr_syl = syllables[j] |
| | if (curr_syl and curr_syl[0].isupper() and |
| | prev_syl and not prev_syl[0].isupper()): |
| | has_transition = True |
| | break |
| |
|
| | if not has_transition: |
| | continue |
| |
|
| | |
| | word_lower = " ".join(s.lower() for s in syllables[start:end]) |
| | if word_lower in CROSS_BOUNDARY_WHITELIST: |
| | continue |
| |
|
| | |
| | word_before = " ".join(syllables[start:end]) |
| | for j in range(start + 1, end): |
| | prev_syl = syllables[j - 1] |
| | curr_syl = syllables[j] |
| | if (curr_syl and curr_syl[0].isupper() and |
| | prev_syl and not prev_syl[0].isupper()): |
| | new_tags[j] = "B-W" |
| |
|
| | word_parts = bio_to_words(syllables[start:end], new_tags[start:end]) |
| | changes.append(f"split \"{word_before}\" → {' + '.join(repr(p) for p in word_parts)}") |
| |
|
| | return new_tags, changes |
| |
|
| |
|
| | def fix_split_long_tokens(syllables, tags, vocab): |
| | """Pass 1.5: Split 5+ syllable tokens into sub-words via vocab decomposition. |
| | |
| | For each word with 5+ syllables, apply greedy left-to-right longest-match |
| | against the vocab (trying 4→3→2 syllable matches). Unmatched syllables |
| | become single-syllable words. |
| | |
| | Returns (new_tags, list of change descriptions). |
| | """ |
| | new_tags = list(tags) |
| | changes = [] |
| |
|
| | |
| | word_spans = [] |
| | current_start = 0 |
| | for i in range(len(tags)): |
| | if tags[i] == "B-W" and i > 0: |
| | word_spans.append((current_start, i)) |
| | current_start = i |
| | word_spans.append((current_start, len(tags))) |
| |
|
| | for start, end in word_spans: |
| | n_syls = end - start |
| | if n_syls < 5: |
| | continue |
| |
|
| | |
| | word_before = " ".join(syllables[start:end]) |
| | pos = start |
| | sub_words = [] |
| |
|
| | while pos < end: |
| | matched = False |
| | |
| | for length in range(min(4, end - pos), 1, -1): |
| | candidate = " ".join( |
| | s.lower() for s in syllables[pos:pos + length] |
| | ) |
| | if candidate in vocab: |
| | sub_words.append((pos, pos + length)) |
| | pos += length |
| | matched = True |
| | break |
| | if not matched: |
| | sub_words.append((pos, pos + 1)) |
| | pos += 1 |
| |
|
| | |
| | if len(sub_words) <= 1: |
| | continue |
| |
|
| | |
| | for sw_start, sw_end in sub_words: |
| | new_tags[sw_start] = "B-W" |
| | for j in range(sw_start + 1, sw_end): |
| | new_tags[j] = "I-W" |
| |
|
| | word_parts = bio_to_words( |
| | syllables[start:end], new_tags[start:end] |
| | ) |
| | changes.append( |
| | f"split \"{word_before}\" → " |
| | f"{' + '.join(repr(p) for p in word_parts)}" |
| | ) |
| |
|
| | return new_tags, changes |
| |
|
| |
|
| | def fix_merge_compounds(syllables, tags): |
| | """Pass 2: Merge always-split compounds. |
| | |
| | Scan syllables left-to-right. At each B-W position, check if the next N |
| | syllables (all at B-W positions) match a MERGE_TERMS entry (case-insensitive). |
| | Longest match first. If so, change subsequent B-W tags to I-W. |
| | |
| | Returns (new_tags, list of change descriptions). |
| | """ |
| | new_tags = list(tags) |
| | changes = [] |
| | n = len(syllables) |
| | i = 0 |
| |
|
| | while i < n: |
| | if new_tags[i] != "B-W": |
| | i += 1 |
| | continue |
| |
|
| | matched = False |
| | |
| | for length in range(min(MERGE_MAX_LEN, n - i), 1, -1): |
| | if length not in _MERGE_BY_LENGTH: |
| | continue |
| |
|
| | |
| | all_bw = True |
| | for j in range(i, i + length): |
| | if j > i and new_tags[j] != "B-W": |
| | all_bw = False |
| | break |
| | if not all_bw: |
| | continue |
| |
|
| | |
| | candidate = tuple(s.lower() for s in syllables[i:i + length]) |
| | if candidate in MERGE_TERMS: |
| | |
| | parts_before = [syllables[j] for j in range(i, i + length)] |
| | for j in range(i + 1, i + length): |
| | new_tags[j] = "I-W" |
| | merged = " ".join(parts_before) |
| | changes.append(f"merge \"{merged}\"") |
| | i += length |
| | matched = True |
| | break |
| |
|
| | if not matched: |
| | i += 1 |
| |
|
| | return new_tags, changes |
| |
|
| |
|
| | def _is_latin_no_vietnamese(s): |
| | """Check if a string is purely Latin-script without Vietnamese diacritics. |
| | |
| | Returns True for ASCII Latin (a-z, A-Z, 0-9, hyphen) and common Latin |
| | extensions BUT NOT Vietnamese-specific characters (ă, â, đ, ê, ô, ơ, ư |
| | and their tone marks). |
| | """ |
| | |
| | vietnamese_chars = re.compile( |
| | r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợ' |
| | r'ùúủũụưứừửữựỳýỷỹỵđÀÁẢÃẠĂẮẰẲẴẶÂẤẦẨẪẬÈÉẺẼẸÊẾỀỂỄỆÌÍỈĨỊ' |
| | r'ÒÓỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢÙÚỦŨỤƯỨỪỬỮỰỲÝỶỸỴĐ]' |
| | ) |
| | if vietnamese_chars.search(s): |
| | return False |
| | |
| | return bool(re.search(r'[a-zA-Z]', s)) |
| |
|
| |
|
| | |
| | FOREIGN_NAME_WHITELIST = { |
| | "beethoven", "homer", "odysseus", "cecelia", "ahern", "holly", |
| | "hideoshi", "gurth", "euler", "hilbert", "rydberg", "bohr", |
| | "frankael-zermelo", "giambattista", "valli", "dachau", |
| | "habsburg", "newton", "einstein", "darwin", "shakespeare", |
| | } |
| |
|
| |
|
| | def fix_foreign_words(syllables, tags, viet_syllables): |
| | """Pass 2.5: Split foreign word merges. |
| | |
| | Detects multi-syllable tokens where ALL syllables are Latin-script only |
| | (no Vietnamese diacritics) AND none of the syllables are common Vietnamese |
| | syllables. Each such foreign syllable becomes its own word (B-W). |
| | |
| | Args: |
| | syllables: list of syllable strings. |
| | tags: list of BIO tag strings. |
| | viet_syllables: set of common Vietnamese syllables (lowercase) for |
| | filtering out false positives like "kinh doanh". |
| | |
| | Returns (new_tags, list of change descriptions). |
| | """ |
| | new_tags = list(tags) |
| | changes = [] |
| |
|
| | |
| | word_spans = [] |
| | current_start = 0 |
| | for i in range(len(tags)): |
| | if tags[i] == "B-W" and i > 0: |
| | word_spans.append((current_start, i)) |
| | current_start = i |
| | word_spans.append((current_start, len(tags))) |
| |
|
| | for start, end in word_spans: |
| | n_syls = end - start |
| | if n_syls < 2: |
| | continue |
| |
|
| | |
| | all_latin = all(_is_latin_no_vietnamese(syllables[j]) for j in range(start, end)) |
| | if not all_latin: |
| | continue |
| |
|
| | |
| | has_viet = any( |
| | syllables[j].lower() in viet_syllables for j in range(start, end) |
| | ) |
| | if has_viet: |
| | continue |
| |
|
| | |
| | token_lower = " ".join(syllables[start:end]).lower() |
| | if token_lower in FOREIGN_NAME_WHITELIST: |
| | continue |
| |
|
| | |
| | word_before = " ".join(syllables[start:end]) |
| | for j in range(start + 1, end): |
| | new_tags[j] = "B-W" |
| |
|
| | parts = [syllables[j] for j in range(start, end)] |
| | changes.append(f"split-foreign \"{word_before}\" → {' + '.join(repr(p) for p in parts)}") |
| |
|
| | return new_tags, changes |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | NAME_BOUNDARY_WHITELIST_S1 = { |
| | "ủy", |
| | "viện", |
| | "tổng", |
| | "nhà", |
| | "phòng", |
| | "cảng", |
| | "xuất", |
| | "sách", |
| | "thuế", |
| | "cây", |
| | "nói", |
| | "bộ", |
| | "đại", |
| | "lò", |
| | "ngay", |
| | "việc", |
| | "vùng", |
| | "sân", |
| | "tiểu", |
| | "trang", |
| | "tết", |
| | "thuyết", |
| | "điểm", |
| | "lý", |
| | "hệ", |
| | } |
| |
|
| |
|
| | def fix_proper_name_boundary(syllables, tags, vocab): |
| | """Pass 2.75: Split proper name boundary merges. |
| | |
| | Detects multi-syllable tokens where uppercase syllables are followed by |
| | a lowercase common word (in vocab). Pattern: [Uppercase...][lowercase_common] |
| | → split before the lowercase word. |
| | |
| | This catches cases like "Tống_tiêu_diệt" → "Tống" + "tiêu_diệt" where |
| | a proper name is merged with the following verb/noun. |
| | |
| | Skips known Vietnamese institutional compounds (NAME_BOUNDARY_WHITELIST_PREFIXES). |
| | |
| | Returns (new_tags, list of change descriptions). |
| | """ |
| | new_tags = list(tags) |
| | changes = [] |
| |
|
| | |
| | word_spans = [] |
| | current_start = 0 |
| | for i in range(len(tags)): |
| | if tags[i] == "B-W" and i > 0: |
| | word_spans.append((current_start, i)) |
| | current_start = i |
| | word_spans.append((current_start, len(tags))) |
| |
|
| | for start, end in word_spans: |
| | n_syls = end - start |
| | if n_syls < 3: |
| | |
| | continue |
| |
|
| | |
| | if syllables[start].lower() in NAME_BOUNDARY_WHITELIST_S1: |
| | continue |
| |
|
| | |
| | |
| | |
| | split_pos = None |
| | for j in range(start + 1, end): |
| | curr_syl = syllables[j] |
| | prev_syl = syllables[j - 1] |
| |
|
| | |
| | if (prev_syl and prev_syl[0].isupper() and |
| | curr_syl and not curr_syl[0].isupper()): |
| | |
| | remaining = " ".join(s.lower() for s in syllables[j:end]) |
| | if remaining in vocab: |
| | split_pos = j |
| | break |
| | |
| | if end - j >= 2: |
| | two_syl = " ".join(s.lower() for s in syllables[j:j+2]) |
| | if two_syl in vocab: |
| | split_pos = j |
| | break |
| |
|
| | if split_pos is None: |
| | continue |
| |
|
| | word_before = " ".join(syllables[start:end]) |
| | new_tags[split_pos] = "B-W" |
| | word_parts = bio_to_words(syllables[start:end], new_tags[start:end]) |
| | changes.append( |
| | f"split-name-boundary \"{word_before}\" → " |
| | f"{' + '.join(repr(p) for p in word_parts)}" |
| | ) |
| |
|
| | return new_tags, changes |
| |
|
| |
|
| | def validate_sentence(syllables, tags): |
| | """Pass 3: Validate BIO invariants. |
| | |
| | Returns list of error descriptions (empty if valid). |
| | """ |
| | errors = [] |
| | if not syllables: |
| | return errors |
| |
|
| | if tags[0] != "B-W": |
| | errors.append(f"sentence starts with {tags[0]} instead of B-W") |
| |
|
| | for i, tag in enumerate(tags): |
| | if tag not in ("B-W", "I-W"): |
| | errors.append(f"position {i}: invalid tag '{tag}'") |
| |
|
| | return errors |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def generate_report(all_stats, output_path=None): |
| | """Generate markdown report of all changes.""" |
| | lines = [] |
| | lines.append("# WS Fix Report") |
| | lines.append("") |
| | lines.append("Fixes applied by `src/fix_ws_errors.py` to UDD-1.1 word segmentation BIO files.") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Summary") |
| | lines.append("") |
| | lines.append("| File | Cross-boundary | Long token | Compound merges | Foreign splits | Name boundary | Validation errors |") |
| | lines.append("|------|---------------:|-----------:|----------------:|---------------:|--------------:|------------------:|") |
| | total_splits = 0 |
| | total_long_splits = 0 |
| | total_merges = 0 |
| | total_foreign = 0 |
| | total_name_boundary = 0 |
| | total_errors = 0 |
| | for fname, stats in all_stats.items(): |
| | n_splits = stats["n_cross_boundary"] |
| | n_long = stats["n_split_long"] |
| | n_merges = stats["n_merge"] |
| | n_foreign = stats.get("n_foreign", 0) |
| | n_name_boundary = stats.get("n_name_boundary", 0) |
| | n_errors = stats["n_validation_errors"] |
| | total_splits += n_splits |
| | total_long_splits += n_long |
| | total_merges += n_merges |
| | total_foreign += n_foreign |
| | total_name_boundary += n_name_boundary |
| | total_errors += n_errors |
| | lines.append(f"| {fname} | {n_splits:,} | {n_long:,} | {n_merges:,} | {n_foreign:,} | {n_name_boundary:,} | {n_errors:,} |") |
| | lines.append(f"| **TOTAL** | **{total_splits:,}** | **{total_long_splits:,}** | **{total_merges:,}** | **{total_foreign:,}** | **{total_name_boundary:,}** | **{total_errors:,}** |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Merge Frequency by Term") |
| | lines.append("") |
| | merge_counts = Counter() |
| | for stats in all_stats.values(): |
| | merge_counts += stats["merge_term_counts"] |
| | lines.append("| Term | Count |") |
| | lines.append("|:-----|------:|") |
| | for term, count in merge_counts.most_common(): |
| | lines.append(f"| {term} | {count:,} |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Cross-Boundary Split Examples") |
| | lines.append("") |
| | for fname, stats in all_stats.items(): |
| | if stats["cross_boundary_examples"]: |
| | lines.append(f"### {fname}") |
| | lines.append("") |
| | for ex in stats["cross_boundary_examples"][:20]: |
| | lines.append(f"- {ex}") |
| | if len(stats["cross_boundary_examples"]) > 20: |
| | lines.append(f"- ... and {len(stats['cross_boundary_examples']) - 20} more") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Long Token Split Examples") |
| | lines.append("") |
| | for fname, stats in all_stats.items(): |
| | if stats["split_long_examples"]: |
| | lines.append(f"### {fname}") |
| | lines.append("") |
| | for ex in stats["split_long_examples"][:30]: |
| | lines.append(f"- {ex}") |
| | if len(stats["split_long_examples"]) > 30: |
| | lines.append(f"- ... and {len(stats['split_long_examples']) - 30} more") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Foreign Word Split Examples") |
| | lines.append("") |
| | for fname, stats in all_stats.items(): |
| | examples = stats.get("foreign_examples", []) |
| | if examples: |
| | lines.append(f"### {fname}") |
| | lines.append("") |
| | for ex in examples[:30]: |
| | lines.append(f"- {ex}") |
| | if len(examples) > 30: |
| | lines.append(f"- ... and {len(examples) - 30} more") |
| | lines.append("") |
| |
|
| | |
| | lines.append("## Name Boundary Split Examples") |
| | lines.append("") |
| | for fname, stats in all_stats.items(): |
| | examples = stats.get("name_boundary_examples", []) |
| | if examples: |
| | lines.append(f"### {fname}") |
| | lines.append("") |
| | for ex in examples[:30]: |
| | lines.append(f"- {ex}") |
| | if len(examples) > 30: |
| | lines.append(f"- ... and {len(examples) - 30} more") |
| | lines.append("") |
| |
|
| | report = "\n".join(lines) |
| |
|
| | if output_path: |
| | with open(output_path, "w", encoding="utf-8") as f: |
| | f.write(report) |
| | print(f"\nReport written to {output_path}") |
| |
|
| | return report |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def process_file(filepath, vocab=None, viet_syllables=None, sentences=None, dry_run=False): |
| | """Process a single BIO file: apply fixes, optionally write back. |
| | |
| | Args: |
| | filepath: Path to BIO file. |
| | vocab: Set of known 2-4 syllable words for long-token splitting. |
| | If None, Pass 1.5 and 2.75 are skipped. |
| | viet_syllables: Set of common Vietnamese syllables for foreign word |
| | filtering. If None, Pass 2.5 is skipped. |
| | sentences: Pre-parsed sentences (avoids re-parsing if already loaded). |
| | dry_run: If True, report changes without modifying files. |
| | |
| | Returns (sentences, stats_dict). |
| | """ |
| | print(f"\nProcessing {filepath}...") |
| | if sentences is None: |
| | sentences = parse_bio_file(filepath) |
| | print(f" Loaded {len(sentences):,} sentences") |
| |
|
| | total_syllables_before = sum(len(s["syllables"]) for s in sentences) |
| | total_words_before = sum( |
| | sum(1 for t in s["tags"] if t == "B-W") for s in sentences |
| | ) |
| |
|
| | n_cross_boundary = 0 |
| | n_split_long = 0 |
| | n_merge = 0 |
| | n_foreign = 0 |
| | n_name_boundary = 0 |
| | n_validation_errors = 0 |
| | cross_boundary_examples = [] |
| | split_long_examples = [] |
| | foreign_examples = [] |
| | name_boundary_examples = [] |
| | merge_term_counts = Counter() |
| |
|
| | for sent in sentences: |
| | syls = sent["syllables"] |
| |
|
| | |
| | tags, cb_changes = fix_cross_boundary(syls, sent["tags"]) |
| | n_cross_boundary += len(cb_changes) |
| | for ch in cb_changes: |
| | cross_boundary_examples.append(f"[{sent['sent_id']}] {ch}") |
| |
|
| | |
| | if vocab is not None: |
| | tags, split_changes = fix_split_long_tokens(syls, tags, vocab) |
| | n_split_long += len(split_changes) |
| | for ch in split_changes: |
| | split_long_examples.append(f"[{sent['sent_id']}] {ch}") |
| |
|
| | |
| | tags, merge_changes = fix_merge_compounds(syls, tags) |
| | n_merge += len(merge_changes) |
| | for ch in merge_changes: |
| | |
| | |
| | term = ch.split('"')[1] if '"' in ch else ch |
| | merge_term_counts[term.lower()] += 1 |
| |
|
| | |
| | if viet_syllables is not None: |
| | tags, fw_changes = fix_foreign_words(syls, tags, viet_syllables) |
| | n_foreign += len(fw_changes) |
| | for ch in fw_changes: |
| | foreign_examples.append(f"[{sent['sent_id']}] {ch}") |
| |
|
| | |
| | if vocab is not None: |
| | tags, nb_changes = fix_proper_name_boundary(syls, tags, vocab) |
| | n_name_boundary += len(nb_changes) |
| | for ch in nb_changes: |
| | name_boundary_examples.append(f"[{sent['sent_id']}] {ch}") |
| |
|
| | |
| | errors = validate_sentence(syls, tags) |
| | n_validation_errors += len(errors) |
| | if errors: |
| | print(f" WARN [{sent['sent_id']}]: {'; '.join(errors)}") |
| |
|
| | sent["tags"] = tags |
| |
|
| | total_syllables_after = sum(len(s["syllables"]) for s in sentences) |
| | total_words_after = sum( |
| | sum(1 for t in s["tags"] if t == "B-W") for s in sentences |
| | ) |
| |
|
| | print(f" Cross-boundary splits: {n_cross_boundary:,}") |
| | print(f" Long token splits: {n_split_long:,}") |
| | print(f" Compound merges: {n_merge:,}") |
| | print(f" Foreign word splits: {n_foreign:,}") |
| | print(f" Name boundary splits: {n_name_boundary:,}") |
| | print(f" Validation errors: {n_validation_errors:,}") |
| | print(f" Words: {total_words_before:,} → {total_words_after:,} " |
| | f"(Δ{total_words_after - total_words_before:+,})") |
| | assert total_syllables_before == total_syllables_after, \ |
| | f"Syllable count changed: {total_syllables_before} → {total_syllables_after}" |
| | print(f" Syllables: {total_syllables_before:,} (unchanged)") |
| |
|
| | if not dry_run: |
| | write_bio_file(sentences, filepath) |
| | print(f" Written: {filepath}") |
| |
|
| | |
| | conllu_path = filepath.replace(".txt", ".conllu") |
| | write_conllu(sentences, conllu_path) |
| | print(f" Written: {conllu_path}") |
| |
|
| | stats = { |
| | "n_cross_boundary": n_cross_boundary, |
| | "n_split_long": n_split_long, |
| | "n_merge": n_merge, |
| | "n_foreign": n_foreign, |
| | "n_name_boundary": n_name_boundary, |
| | "n_validation_errors": n_validation_errors, |
| | "cross_boundary_examples": cross_boundary_examples, |
| | "split_long_examples": split_long_examples, |
| | "foreign_examples": foreign_examples, |
| | "name_boundary_examples": name_boundary_examples, |
| | "merge_term_counts": merge_term_counts, |
| | "words_before": total_words_before, |
| | "words_after": total_words_after, |
| | } |
| |
|
| | return sentences, stats |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser( |
| | description="Fix known word segmentation errors in UDD-1.1 BIO files." |
| | ) |
| | parser.add_argument( |
| | "--dry-run", action="store_true", |
| | help="Report changes without modifying files" |
| | ) |
| | args = parser.parse_args() |
| |
|
| | base_dir = dirname(dirname(__file__)) |
| | bio_files = [ |
| | join(base_dir, f"udd-ws-v1.1-{split}.txt") |
| | for split in ("train", "dev", "test") |
| | ] |
| |
|
| | |
| | for path in bio_files: |
| | if not isfile(path): |
| | print(f"ERROR: {path} not found", file=sys.stderr) |
| | sys.exit(1) |
| |
|
| | if args.dry_run: |
| | print("=== DRY RUN — no files will be modified ===") |
| |
|
| | |
| | all_sentences_by_file = {} |
| | for path in bio_files: |
| | print(f"Loading {path}...") |
| | all_sentences_by_file[path] = parse_bio_file(path) |
| | print(f" {len(all_sentences_by_file[path]):,} sentences") |
| |
|
| | |
| | all_sents = [s for sents in all_sentences_by_file.values() for s in sents] |
| | vocab = build_split_vocab(all_sents) |
| | print(f"\nBuilt split vocab: {len(vocab):,} entries " |
| | f"(2-4 syllable words with count >= 5)") |
| | viet_syllables = build_viet_syllables(all_sents) |
| | print(f"Built Vietnamese syllable set: {len(viet_syllables):,} entries " |
| | f"(syllables with count >= 50)") |
| |
|
| | |
| | all_stats = {} |
| | for path in bio_files: |
| | fname = path.rsplit("/", 1)[-1] |
| | _, stats = process_file( |
| | path, |
| | vocab=vocab, |
| | viet_syllables=viet_syllables, |
| | sentences=all_sentences_by_file[path], |
| | dry_run=args.dry_run, |
| | ) |
| | all_stats[fname] = stats |
| |
|
| | |
| | report_path = join(base_dir, "WS_FIX_REPORT.md") |
| | if not args.dry_run: |
| | generate_report(all_stats, report_path) |
| | else: |
| | report = generate_report(all_stats) |
| | print("\n" + report) |
| |
|
| | |
| | total_splits = sum(s["n_cross_boundary"] for s in all_stats.values()) |
| | total_long = sum(s["n_split_long"] for s in all_stats.values()) |
| | total_merges = sum(s["n_merge"] for s in all_stats.values()) |
| | total_foreign = sum(s.get("n_foreign", 0) for s in all_stats.values()) |
| | total_name_boundary = sum(s.get("n_name_boundary", 0) for s in all_stats.values()) |
| | total_errors = sum(s["n_validation_errors"] for s in all_stats.values()) |
| | print(f"\n{'='*50}") |
| | print(f"TOTAL: {total_splits:,} cross-boundary splits, " |
| | f"{total_long:,} long token splits, " |
| | f"{total_merges:,} compound merges, " |
| | f"{total_foreign:,} foreign word splits, " |
| | f"{total_name_boundary:,} name boundary splits, " |
| | f"{total_errors:,} validation errors") |
| | if args.dry_run: |
| | print("(dry run — no files modified)") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|