| | """ |
| | Fetch data from HuggingFace dataset undertheseanlp/UVB-v0.1 |
| | - Get 8,000 high-quality sentences from fiction books |
| | - Get 8,000 high-quality sentences from non-fiction books |
| | """ |
| |
|
| | import re |
| | from os.path import dirname, join |
| |
|
| | from datasets import load_dataset |
| | from underthesea import sent_tokenize, text_normalize |
| |
|
| |
|
| | |
| | FICTION_GENRES = { |
| | "Fiction", "Novels", "Romance", "Fantasy", "Science Fiction", |
| | "Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction", |
| | "Adventure", "Crime", "Suspense", "Drama", "Short Stories" |
| | } |
| |
|
| | |
| | NON_FICTION_GENRES = { |
| | "Non Fiction", "Nonfiction", "History", "Biography", "Autobiography", |
| | "Self Help", "Psychology", "Philosophy", "Science", "Politics", |
| | "Economics", "Business", "Education", "Travel", "Memoir", |
| | "Essays", "Reference", "Health", "Religion", "Spirituality" |
| | } |
| |
|
| |
|
| | def clean_text(text): |
| | """Remove formatting and clean text.""" |
| | |
| | text = text_normalize(text) |
| | |
| | text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE) |
| | |
| | text = re.sub(r'\*+', '', text) |
| | |
| | text = re.sub(r'^-+$', '', text, flags=re.MULTILINE) |
| | |
| | text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) |
| | |
| | text = re.sub(r'\n{2,}', '\n', text) |
| | |
| | lines = [line.strip() for line in text.split('\n')] |
| | text = '\n'.join(lines) |
| | return text |
| |
|
| |
|
| | def is_high_quality_sentence(sent): |
| | """Check if sentence is high quality for UD annotation.""" |
| | sent = sent.strip() |
| |
|
| | if not sent: |
| | return False, sent |
| |
|
| | |
| | if len(sent) < 30: |
| | return False, sent |
| | if len(sent) > 250: |
| | return False, sent |
| |
|
| | |
| | words = sent.split() |
| | if len(words) < 5: |
| | return False, sent |
| | if len(words) > 40: |
| | return False, sent |
| |
|
| | |
| | if not sent[0].isupper(): |
| | return False, sent |
| |
|
| | |
| | if not sent.rstrip()[-1] in '.!?…"»': |
| | return False, sent |
| |
|
| | |
| | if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3: |
| | return False, sent |
| |
|
| | |
| | if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE): |
| | return False, sent |
| |
|
| | |
| | num_digits = sum(1 for c in sent if c.isdigit()) |
| | if num_digits > len(sent) * 0.15: |
| | return False, sent |
| |
|
| | |
| | if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent): |
| | return False, sent |
| |
|
| | |
| | if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE): |
| | return False, sent |
| |
|
| | |
| | punct_count = sum(1 for c in sent if c in '.,;:!?-–—()[]{}""\'\'«»') |
| | if punct_count > len(words) * 1.5: |
| | return False, sent |
| |
|
| | |
| | if '...' in sent[:-5]: |
| | return False, sent |
| |
|
| | |
| | quote_count = sent.count('"') + sent.count('"') + sent.count('"') |
| | if quote_count > 4: |
| | return False, sent |
| |
|
| | return True, sent |
| |
|
| |
|
| | def classify_book(genres): |
| | """Classify book as fiction or non-fiction based on genres.""" |
| | if not genres: |
| | return None |
| |
|
| | genres_set = set(genres) |
| |
|
| | is_fiction = bool(genres_set & FICTION_GENRES) |
| | is_non_fiction = bool(genres_set & NON_FICTION_GENRES) |
| |
|
| | if is_fiction and not is_non_fiction: |
| | return "fiction" |
| | elif is_non_fiction and not is_fiction: |
| | return "non-fiction" |
| | elif is_fiction and is_non_fiction: |
| | |
| | fiction_count = len(genres_set & FICTION_GENRES) |
| | non_fiction_count = len(genres_set & NON_FICTION_GENRES) |
| | return "fiction" if fiction_count > non_fiction_count else "non-fiction" |
| |
|
| | return None |
| |
|
| |
|
| | def extract_sentences_from_book(content, max_sentences=500): |
| | """Extract high-quality sentences from book content.""" |
| | content = clean_text(content) |
| | sentences = sent_tokenize(content) |
| |
|
| | valid_sentences = [] |
| | for sent in sentences: |
| | is_valid, cleaned_sent = is_high_quality_sentence(sent) |
| | if is_valid: |
| | valid_sentences.append(cleaned_sent) |
| | if len(valid_sentences) >= max_sentences: |
| | break |
| |
|
| | return valid_sentences |
| |
|
| |
|
| | def fetch_and_process(): |
| | print("Loading UVB-v0.1 dataset from HuggingFace...") |
| | ds = load_dataset("undertheseanlp/UVB-v0.1", split="train") |
| |
|
| | print(f"Total books in dataset: {len(ds)}") |
| |
|
| | |
| | fiction_books = [] |
| | non_fiction_books = [] |
| |
|
| | for book in ds: |
| | genres = book.get("genres", []) |
| | rating = book.get("goodreads_rating", 0) or 0 |
| | num_ratings = book.get("goodreads_num_ratings", 0) or 0 |
| |
|
| | |
| | quality_score = rating * min(num_ratings / 100, 10) |
| |
|
| | book_type = classify_book(genres) |
| | book_info = { |
| | "title": book["title"], |
| | "content": book["content"], |
| | "rating": rating, |
| | "num_ratings": num_ratings, |
| | "quality_score": quality_score, |
| | "genres": genres |
| | } |
| |
|
| | if book_type == "fiction": |
| | fiction_books.append(book_info) |
| | elif book_type == "non-fiction": |
| | non_fiction_books.append(book_info) |
| |
|
| | print(f"Fiction books: {len(fiction_books)}") |
| | print(f"Non-fiction books: {len(non_fiction_books)}") |
| |
|
| | |
| | fiction_books.sort(key=lambda x: x["quality_score"], reverse=True) |
| | non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True) |
| |
|
| | |
| | print("\nExtracting sentences from fiction books...") |
| | fiction_sentences = [] |
| | for i, book in enumerate(fiction_books): |
| | if len(fiction_sentences) >= 8000: |
| | break |
| | sentences = extract_sentences_from_book(book["content"]) |
| | for sent in sentences: |
| | if len(fiction_sentences) >= 8000: |
| | break |
| | fiction_sentences.append(sent) |
| | print(f" [{i+1}/{len(fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(fiction_sentences)})") |
| |
|
| | |
| | print("\nExtracting sentences from non-fiction books...") |
| | non_fiction_sentences = [] |
| | for i, book in enumerate(non_fiction_books): |
| | if len(non_fiction_sentences) >= 8000: |
| | break |
| | sentences = extract_sentences_from_book(book["content"]) |
| | for sent in sentences: |
| | if len(non_fiction_sentences) >= 8000: |
| | break |
| | non_fiction_sentences.append(sent) |
| | print(f" [{i+1}/{len(non_fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(non_fiction_sentences)})") |
| |
|
| | print(f"\nFiction sentences collected: {len(fiction_sentences)}") |
| | print(f"Non-fiction sentences collected: {len(non_fiction_sentences)}") |
| |
|
| | |
| | all_sentences = fiction_sentences[:8000] + non_fiction_sentences[:8000] |
| | print(f"Total sentences: {len(all_sentences)}") |
| |
|
| | |
| | output_dir = dirname(dirname(__file__)) |
| | output_file = join(output_dir, "sentences_uvb.txt") |
| |
|
| | with open(output_file, "w", encoding="utf-8") as f: |
| | for i, sent in enumerate(all_sentences, 1): |
| | source = "fiction" if i <= len(fiction_sentences[:8000]) else "non-fiction" |
| | f.write(f"{i}\t{source}\t{sent}\n") |
| |
|
| | print(f"\nSaved to: {output_file}") |
| |
|
| | |
| | print("\nSample fiction sentences:") |
| | for i, sent in enumerate(fiction_sentences[:3], 1): |
| | print(f" {i}. {sent[:100]}...") |
| |
|
| | print("\nSample non-fiction sentences:") |
| | for i, sent in enumerate(non_fiction_sentences[:3], 1): |
| | print(f" {i}. {sent[:100]}...") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | fetch_and_process() |
| |
|