|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import datasets |
|
|
import gzip |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@InProceedings{huggingface:dataset, |
|
|
title = {A great new dataset}, |
|
|
author={huggingface, Inc. |
|
|
}, |
|
|
year={2020} |
|
|
} |
|
|
""" |
|
|
|
|
|
_VERSION= "1.1.0" |
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
BSBasque dataset. The text is extracted from the following domains: |
|
|
|
|
|
https://www.berria.eus |
|
|
https://eu.wikipedia.org |
|
|
https://goiena.eus |
|
|
https://www.argia.eus |
|
|
https://goierri.hitza.eus |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
_HOMEPAGE = "to.be.announced.eus" |
|
|
|
|
|
|
|
|
_LICENSE = "CC BY-SA 4.0" |
|
|
|
|
|
_BASE_DATA_URL_STR = ( |
|
|
"https://ixa2.si.ehu.es/~ccpsoeta/bsbasque/" |
|
|
) |
|
|
_BASE_CHECKSUM_FILE_NAME = "bsbasque_sha256.txt" |
|
|
|
|
|
class BsBasqueConfig(datasets.BuilderConfig): |
|
|
"BsBasque corpus." |
|
|
|
|
|
def __init__(self, **kwargs): |
|
|
"""BuilderConfig for BsBasque. |
|
|
|
|
|
Args: |
|
|
**kwargs: Keyword arguments forwarded to super. |
|
|
""" |
|
|
|
|
|
|
|
|
name = "bsbasque" |
|
|
description = "BsBasque dataset" |
|
|
super(BsBasqueConfig, self).__init__(name=name, description=description, **kwargs) |
|
|
|
|
|
|
|
|
self.base_data_url = _BASE_DATA_URL_STR |
|
|
|
|
|
|
|
|
|
|
|
class BSBasque(datasets.GeneratorBasedBuilder): |
|
|
"""TODO: Short description of my dataset.""" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
BsBasqueConfig(version=datasets.Version(_VERSION)) |
|
|
] |
|
|
BUILDER_CONFIG_CLASS = BsBasqueConfig |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features({"id": datasets.Value("int64"), "text": datasets.Value("string")}), |
|
|
supervised_keys=None, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
checksum_url = self.config.base_data_url + _BASE_CHECKSUM_FILE_NAME |
|
|
checksum_file = dl_manager.download(checksum_url) |
|
|
with open(checksum_file, encoding="utf-8") as f: |
|
|
data_filenames = [line.split("\t")[0] for line in f if line] |
|
|
data_urls = [self.config.base_data_url + data_filename for data_filename in data_filenames] |
|
|
downloaded_files = dl_manager.download(data_urls) |
|
|
return [ |
|
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
|
"""This function returns the examples in the raw (text) form by iterating on all the files.""" |
|
|
id_ = 0 |
|
|
for filepath in filepaths: |
|
|
logger.info("generating examples from = %s", filepath) |
|
|
with gzip.open(filepath, "rt") as f: |
|
|
for line in f: |
|
|
feature = id_, {"id": id_, "text": "".join(line).rstrip()} |
|
|
yield feature |
|
|
id_ += 1 |
|
|
|