UDD-1 / src /fetch_uvw_data.py
rain1024's picture
Expand UDD-1 to 40K sentences across 5 domains
f5d0a0d
"""
Fetch data from HuggingFace dataset undertheseanlp/UVW-2026
- Get articles with quality_score >= 5
- Segment sentences using underthesea
- Get first 8000 sentences
"""
import re
from os.path import dirname, join
from datasets import load_dataset
from underthesea import sent_tokenize, text_normalize
def clean_text(text):
"""Remove formatting and clean text."""
# Normalize Unicode using underthesea
text = text_normalize(text)
# Remove markdown headers
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
# Remove bold/italic markers
text = re.sub(r'\*+', '', text)
# Remove horizontal rules
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
# Remove links
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
# Remove multiple newlines
text = re.sub(r'\n{2,}', '\n', text)
# Remove leading/trailing whitespace per line
lines = [line.strip() for line in text.split('\n')]
text = '\n'.join(lines)
return text
def is_valid_sentence(sent):
"""Check if sentence is valid for UD annotation."""
sent = sent.strip()
if not sent:
return False, sent
# Too short
if len(sent) < 20:
return False, sent
# Too long
if len(sent) > 300:
return False, sent
# Must contain Vietnamese characters
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
return False, sent
# Skip if mostly uppercase (headers, titles)
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
return False, sent
# Skip Wikipedia stub markers
if re.search(r'(bài sơ khai|sơ khai về|cần được mở rộng|Thể loại:)', sent):
return False, sent
# Skip category lists
if re.match(r'^(Thể loại|Danh sách|Xem thêm|Tham khảo|Liên kết ngoài|Chú thích)', sent):
return False, sent
# Skip infobox remnants (pipe-separated values, key=value patterns)
if sent.count('|') > 2:
return False, sent
if re.search(r'\w+=\w+', sent) and sent.count('=') > 1:
return False, sent
# Skip reference fragments ([1], [cần dẫn nguồn])
if re.search(r'\[\d+\]', sent):
return False, sent
if re.search(r'\[cần', sent):
return False, sent
# Skip sentences with URLs
if re.search(r'(http|www\.|\.com|\.org)', sent, re.IGNORECASE):
return False, sent
# Skip sentences with excessive numbers (data tables)
num_digits = sum(1 for c in sent if c.isdigit())
if num_digits > len(sent) * 0.3:
return False, sent
# Skip list items starting with bullets or numbers
if re.match(r'^[\*\-•]\s', sent):
return False, sent
return True, sent
TARGET_COUNT = 8000
def fetch_and_process():
# Load dataset from HuggingFace
print("Loading UVW-2026 dataset from HuggingFace...")
ds = load_dataset("undertheseanlp/UVW-2026", split="train")
print(f"Total articles in dataset: {len(ds)}")
# Filter by quality score
print("Filtering articles by quality_score >= 5...")
high_quality = [doc for doc in ds if (doc.get("quality_score") or 0) >= 5]
print(f"High-quality articles: {len(high_quality)}")
# Segment sentences from all documents until we have enough
print("Segmenting sentences...")
all_sentences = []
for idx, doc in enumerate(high_quality):
content = doc["content"]
content = clean_text(content)
sentences = sent_tokenize(content)
for sent in sentences:
sent = sent.strip()
is_valid, cleaned_sent = is_valid_sentence(sent)
if is_valid:
all_sentences.append(cleaned_sent)
if len(all_sentences) >= TARGET_COUNT:
print(f"Processed {idx + 1} articles")
break
# Get first TARGET_COUNT sentences
sentences_out = all_sentences[:TARGET_COUNT]
print(f"Total sentences collected: {len(sentences_out)}")
# Save to output file
output_dir = dirname(dirname(__file__))
output_file = join(output_dir, "sentences_uvw.txt")
with open(output_file, "w", encoding="utf-8") as f:
for i, sent in enumerate(sentences_out, 1):
f.write(f"{i}\t{sent}\n")
print(f"Saved to: {output_file}")
# Print sample
print("\nSample sentences:")
for i, sent in enumerate(sentences_out[:5], 1):
print(f" {i}. {sent[:80]}...")
if __name__ == "__main__":
fetch_and_process()