Datasets:
Tasks:
Text Retrieval
Sub-tasks:
entity-linking-retrieval
Languages:
Chinese
Size:
1M - 10M
ArXiv:
License:
| # coding=utf-8 | |
| """Hansel: A Chinese Few-Shot and Zero-Shot Entity Linking Benchmark""" | |
| import json | |
| import os | |
| import datasets | |
| _HANSEL_CITATION = """\ | |
| @misc{xu2022hansel, | |
| title = {Hansel: A Chinese Few-Shot and Zero-Shot Entity Linking Benchmark}, | |
| author = {Xu, Zhenran and Shan, Zifei and Li, Yuxin and Hu, Baotian and Qin, Bing}, | |
| publisher = {arXiv}, | |
| year = {2022}, | |
| url = {https://arxiv.org/abs/2207.13005} | |
| } | |
| """ | |
| _HANSEL_DESCRIPTION = """\ | |
| Hansel is a high-quality human-annotated Chinese entity linking (EL) dataset, used for testing Chinese EL systems' generalization ability to tail entities and emerging entities. | |
| The test set contains Few-shot (FS) and zero-shot (ZS) slices, has 10K examples and uses Wikidata as the corresponding knowledge base. | |
| The training and validation sets are from Wikipedia hyperlinks, useful for large-scale pretraining of Chinese EL systems. | |
| """ | |
| _URLS = { | |
| "train": "hansel-train.jsonl", | |
| "val": "hansel-val.jsonl", | |
| "hansel-fs": "hansel-few-shot-v1.jsonl", | |
| "hansel-zs": "hansel-zero-shot-v1.jsonl", | |
| } | |
| logger = datasets.logging.get_logger(__name__) | |
| class HanselConfig(datasets.BuilderConfig): | |
| """BuilderConfig for HanselConfig.""" | |
| def __init__(self, features, data_url, citation, url, **kwargs): | |
| """BuilderConfig for Hansel. | |
| Args: | |
| features: `list[string]`, list of the features that will appear in the | |
| feature dict. Should not include "label". | |
| data_url: `string`, url to download the zip file from. | |
| citation: `string`, citation for the data set. | |
| url: `string`, url for information about the data set. | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(HanselConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) | |
| self.features = features | |
| self.data_url = data_url | |
| self.citation = citation | |
| self.url = url | |
| class Hansel(datasets.GeneratorBasedBuilder): | |
| """The Hansel benchmark.""" | |
| BUILDER_CONFIGS = [ | |
| HanselConfig( | |
| name="wiki", | |
| description=_HANSEL_DESCRIPTION, | |
| features=["id", "text", "start", "end", "mention", "gold_id"], | |
| data_url="https://huggingface.co/datasets/HIT-TMG/Hansel/blob/main/", | |
| citation=_HANSEL_CITATION, | |
| url="https://github.com/HITsz-TMG/Hansel", | |
| ), | |
| HanselConfig( | |
| name="hansel-few-shot", | |
| description=_HANSEL_DESCRIPTION, | |
| features=["id", "text", "start", "end", "mention", "gold_id", "source", "domain"], | |
| data_url="https://huggingface.co/datasets/HIT-TMG/Hansel/blob/main/", | |
| citation=_HANSEL_CITATION, | |
| url="https://github.com/HITsz-TMG/Hansel", | |
| ), | |
| HanselConfig( | |
| name="hansel-zero-shot", | |
| description=_HANSEL_DESCRIPTION, | |
| features=["id", "text", "start", "end", "mention", "gold_id", "source", "domain"], | |
| data_url="https://huggingface.co/datasets/HIT-TMG/Hansel/blob/main/", | |
| citation=_HANSEL_CITATION, | |
| url="https://github.com/HITsz-TMG/Hansel", | |
| ) | |
| ] | |
| def _info(self): | |
| features = {feature: datasets.Value("string") for feature in self.config.features} | |
| features["start"] = datasets.Value("int64") | |
| features["end"] = datasets.Value("int64") | |
| return datasets.DatasetInfo( | |
| description=self.config.description, | |
| features=datasets.Features(features), | |
| homepage=self.config.url, | |
| citation=self.config.citation | |
| ) | |
| def _split_generators(self, dl_manager): | |
| urls_to_download = _URLS | |
| downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
| if "hansel-few" in self.config.name: | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "data_file": downloaded_files["hansel-fs"], | |
| "split": datasets.Split.TEST, | |
| }, | |
| ), | |
| ] | |
| if "hansel-zero" in self.config.name: | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "data_file": downloaded_files["hansel-zs"], | |
| "split": datasets.Split.TEST, | |
| }, | |
| ), | |
| ] | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "data_file": downloaded_files["train"], | |
| "split": datasets.Split.TRAIN, | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={ | |
| "data_file": downloaded_files["val"], | |
| "split": datasets.Split.VALIDATION, | |
| }, | |
| ), | |
| ] | |
| def _generate_examples(self, data_file, split): | |
| logger.info("generating examples from = %s", data_file) | |
| with open(data_file, encoding="utf-8") as f: | |
| for idx, line in enumerate(f): | |
| temDict = json.loads(line) | |
| yield idx, temDict | |