| import datasets | |
| import os | |
| import logging | |
| import lm_dataformat | |
| from tqdm import tqdm | |
| _DESC = """ | |
| BinhVQ news corpus version 2021 (~20M records) | |
| https://github.com/binhvq/news-corpus | |
| Preprocessed: | |
| - Read mongo dump and export to jsonl | |
| - Clean content with Beautifulsoup | |
| - Concatenate title, sapo and content | |
| - Remove exact match sha256 | |
| - Shuffle and split train / val (0.01) | |
| **IMPORTANT**: Please run `pip install lm_dataformat` before load this dataset | |
| """ | |
| _REPO_URL = "https://huggingface.co/datasets/imthanhlv/binhvq_dedup/tree/main/" | |
| _URLS = { | |
| "train": "train.jsonl.zst", | |
| "val": "val.jsonl.zst" | |
| } | |
| try: | |
| import lm_dataformat | |
| except ImportError: | |
| print( | |
| "Can't import lm_dataformat, please run pip install lm_dataformat and try again" | |
| ) | |
| exit() | |
| class BinhvqConfig(datasets.BuilderConfig): | |
| def __init__(self, **kwargs): | |
| super(BinhvqConfig, self).__init__(**kwargs) | |
| class Binhvq(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [ | |
| BinhvqConfig( | |
| name="text", | |
| version=datasets.Version("1.0.0", ""), | |
| description="Text", | |
| ), | |
| ] | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESC, | |
| features=datasets.Features({"text": datasets.Value("string")}), | |
| homepage="https://github.com/binhvq/news-corpus", | |
| ) | |
| def _split_generators(self, dl_manager): | |
| downloaded_files = dl_manager.download(_URLS) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={"filepath": downloaded_files["train"]}, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={"filepath": downloaded_files["val"]}, | |
| ), | |
| ] | |
| def _generate_examples(self, filepath): | |
| logging.warning(f"Generating examples from {filepath}") | |
| _id = 0 | |
| reader = lm_dataformat.Reader(filepath) | |
| for doc in reader.read_jsonl_zst(filepath): | |
| yield _id, {"text": doc} | |
| _id += 1 | |