| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """Cleaned Indonesian split of the KoPI corpus.""" |
| | import json |
| | import glob |
| | import gzip |
| | import textwrap |
| | import datasets |
| | import zstandard as zstd |
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| | _CITATION = """ |
| | |
| | """ |
| | _DESCRIPTION = """\ |
| | |
| | """ |
| | _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI" |
| | _LICENSE = "CC0" |
| | _BASE_URL = { |
| | "train":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/data/kopi-{index:012d}.json.zst", |
| | "val":"https://huggingface.co/datasets/munggok/KoPI/resolve/main/data/kopi-val-{index:012d}.json.zst" |
| |
|
| | } |
| | _CONFIGS = { |
| | "tiny": {"train": 10, "validation": 1}, |
| | "small": {"train": 30, "validation": 2}, |
| | "medium": {"train": 55, "validation": 2}, |
| | "large": {"train": 75, "validation": 3}, |
| | "full": {"train": 107, "validation": 4} |
| | } |
| | class KoPIConfig(datasets.BuilderConfig): |
| | """BuilderConfig for the Clean KoPI corpus.""" |
| | def __init__(self, **kwargs): |
| | """BuilderConfig for Clean KoPI corpus. |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super().__init__(**kwargs) |
| | class KoPI(datasets.GeneratorBasedBuilder): |
| | """KoPI corpus.""" |
| | BUILDER_CONFIGS = [ |
| | KoPIConfig( |
| | name="tiny", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | Tiny version only using 10 shard |
| | """ |
| | ) |
| | ), |
| | KoPIConfig( |
| | name="small", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | small version only using 30 shard |
| | """ |
| | ) |
| | ), |
| | KoPIConfig( |
| | name="medium", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | medion version only using 50 shard |
| | """ |
| | ) |
| | ), |
| | KoPIConfig( |
| | name="large", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | large version only using 75 shard |
| | """ |
| | ) |
| | ), |
| | KoPIConfig( |
| | name="full", |
| | version=datasets.Version("1.0.0"), |
| | description=textwrap.dedent( |
| | f"""\ |
| | The full cleaned version of KoPI corpus. |
| | Estimated size of compressed files: 53GB |
| | """ |
| | ) |
| | ) |
| | ] |
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "text": datasets.Value("string"), |
| | "url": datasets.Value("string"), |
| | "timestamp": datasets.Value("string"), |
| | "meta": datasets.Value("string"), |
| | } |
| | ), |
| | supervised_keys=None, |
| | homepage=_HOMEPAGE, |
| | license=_LICENSE, |
| | citation=_CITATION, |
| | ) |
| | def _split_generators(self, dl_manager): |
| | train = [_BASE_URL["train"].format(index=k + 1) for k in range(107)][0:_CONFIGS[self.config.name]['train']] |
| | validation = [_BASE_URL["val"].format(index=k + 108) for k in range(4)][0:_CONFIGS[self.config.name]['validation']] |
| | train_downloaded_files = dl_manager.download(train) |
| | validation_downloaded_files = dl_manager.download(validation) |
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files} |
| | ), |
| | ] |
| | def _generate_examples(self, filepaths): |
| | """This function returns the examples in the raw (text) form by iterating on all the files.""" |
| | id_ = 0 |
| | for filepath in filepaths: |
| | logger.info(f"Generating examples from {filepath}") |
| | with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: |
| | for line in f: |
| | if line: |
| | example = json.loads(line) |
| | if example.get('meta') is not None: |
| | yield id_, {'text':example['text'],'url':example['url'],'timestamp':example['timestamp'],'meta': example['meta']} |
| | id_ += 1 |
| | else: |
| | yield id_, {'text':example['text'],'url':example['url'],'timestamp':example['timestamp'],'meta': "None"} |
| | id_ += 1 |
| |
|