Datasets:
File size: 4,625 Bytes
f5d0a0d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | """
Fetch data from HuggingFace dataset undertheseanlp/UVW-2026
- Get articles with quality_score >= 5
- Segment sentences using underthesea
- Get first 8000 sentences
"""
import re
from os.path import dirname, join
from datasets import load_dataset
from underthesea import sent_tokenize, text_normalize
def clean_text(text):
"""Remove formatting and clean text."""
# Normalize Unicode using underthesea
text = text_normalize(text)
# Remove markdown headers
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
# Remove bold/italic markers
text = re.sub(r'\*+', '', text)
# Remove horizontal rules
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
# Remove links
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
# Remove multiple newlines
text = re.sub(r'\n{2,}', '\n', text)
# Remove leading/trailing whitespace per line
lines = [line.strip() for line in text.split('\n')]
text = '\n'.join(lines)
return text
def is_valid_sentence(sent):
"""Check if sentence is valid for UD annotation."""
sent = sent.strip()
if not sent:
return False, sent
# Too short
if len(sent) < 20:
return False, sent
# Too long
if len(sent) > 300:
return False, sent
# Must contain Vietnamese characters
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
return False, sent
# Skip if mostly uppercase (headers, titles)
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
return False, sent
# Skip Wikipedia stub markers
if re.search(r'(bài sơ khai|sơ khai về|cần được mở rộng|Thể loại:)', sent):
return False, sent
# Skip category lists
if re.match(r'^(Thể loại|Danh sách|Xem thêm|Tham khảo|Liên kết ngoài|Chú thích)', sent):
return False, sent
# Skip infobox remnants (pipe-separated values, key=value patterns)
if sent.count('|') > 2:
return False, sent
if re.search(r'\w+=\w+', sent) and sent.count('=') > 1:
return False, sent
# Skip reference fragments ([1], [cần dẫn nguồn])
if re.search(r'\[\d+\]', sent):
return False, sent
if re.search(r'\[cần', sent):
return False, sent
# Skip sentences with URLs
if re.search(r'(http|www\.|\.com|\.org)', sent, re.IGNORECASE):
return False, sent
# Skip sentences with excessive numbers (data tables)
num_digits = sum(1 for c in sent if c.isdigit())
if num_digits > len(sent) * 0.3:
return False, sent
# Skip list items starting with bullets or numbers
if re.match(r'^[\*\-•]\s', sent):
return False, sent
return True, sent
TARGET_COUNT = 8000
def fetch_and_process():
# Load dataset from HuggingFace
print("Loading UVW-2026 dataset from HuggingFace...")
ds = load_dataset("undertheseanlp/UVW-2026", split="train")
print(f"Total articles in dataset: {len(ds)}")
# Filter by quality score
print("Filtering articles by quality_score >= 5...")
high_quality = [doc for doc in ds if (doc.get("quality_score") or 0) >= 5]
print(f"High-quality articles: {len(high_quality)}")
# Segment sentences from all documents until we have enough
print("Segmenting sentences...")
all_sentences = []
for idx, doc in enumerate(high_quality):
content = doc["content"]
content = clean_text(content)
sentences = sent_tokenize(content)
for sent in sentences:
sent = sent.strip()
is_valid, cleaned_sent = is_valid_sentence(sent)
if is_valid:
all_sentences.append(cleaned_sent)
if len(all_sentences) >= TARGET_COUNT:
print(f"Processed {idx + 1} articles")
break
# Get first TARGET_COUNT sentences
sentences_out = all_sentences[:TARGET_COUNT]
print(f"Total sentences collected: {len(sentences_out)}")
# Save to output file
output_dir = dirname(dirname(__file__))
output_file = join(output_dir, "sentences_uvw.txt")
with open(output_file, "w", encoding="utf-8") as f:
for i, sent in enumerate(sentences_out, 1):
f.write(f"{i}\t{sent}\n")
print(f"Saved to: {output_file}")
# Print sample
print("\nSample sentences:")
for i, sent in enumerate(sentences_out[:5], 1):
print(f" {i}. {sent[:80]}...")
if __name__ == "__main__":
fetch_and_process()
|