Update wit.py
Browse files
wit.py
CHANGED
|
@@ -1,8 +1,7 @@
|
|
| 1 |
import json
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
-
from datasets import
|
| 5 |
-
|
| 6 |
|
| 7 |
_CITATION = """\\n@article{srinivasan2021wit,
|
| 8 |
title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},
|
|
@@ -11,52 +10,52 @@ _CITATION = """\\n@article{srinivasan2021wit,
|
|
| 11 |
year={2021}
|
| 12 |
}
|
| 13 |
"""
|
| 14 |
-
|
| 15 |
_DESCRIPTION = """\\nWikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset. WIT is composed of a curated set
|
| 16 |
of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages. Its
|
| 17 |
size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
|
| 18 |
"""
|
| 19 |
-
|
| 20 |
_HOMEPAGE = "https://github.com/google-research-datasets/wit"
|
| 21 |
-
|
| 22 |
_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/wit/"
|
| 23 |
_URLS = {
|
| 24 |
-
|
| 25 |
-
'train': [_URL + f"part-{'%05d' % i}-48a6f07e-bb86-4735-aac7-883349f41a28-c000.json.gz" for i in range(10)]
|
| 26 |
}
|
| 27 |
|
|
|
|
| 28 |
class Wit(datasets.GeneratorBasedBuilder):
|
| 29 |
"""WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning"""
|
| 30 |
|
| 31 |
def _info(self):
|
| 32 |
return datasets.DatasetInfo(
|
| 33 |
description=_DESCRIPTION,
|
| 34 |
-
features=Features(
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
| 60 |
homepage=_HOMEPAGE,
|
| 61 |
citation=_CITATION,
|
| 62 |
)
|
|
@@ -78,7 +77,22 @@ class Wit(datasets.GeneratorBasedBuilder):
|
|
| 78 |
line = line.strip()
|
| 79 |
row_data = json.loads(line, encoding='utf-8')
|
| 80 |
for feature in row_data['wit_features']:
|
|
|
|
|
|
|
| 81 |
for fname in wit_feature_names:
|
| 82 |
if fname not in feature:
|
| 83 |
feature[fname] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
yield str(i), row_data
|
|
|
|
| 1 |
import json
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
+
from datasets import Features, Sequence, Value
|
|
|
|
| 5 |
|
| 6 |
_CITATION = """\\n@article{srinivasan2021wit,
|
| 7 |
title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},
|
|
|
|
| 10 |
year={2021}
|
| 11 |
}
|
| 12 |
"""
|
|
|
|
| 13 |
_DESCRIPTION = """\\nWikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset. WIT is composed of a curated set
|
| 14 |
of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages. Its
|
| 15 |
size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
|
| 16 |
"""
|
|
|
|
| 17 |
_HOMEPAGE = "https://github.com/google-research-datasets/wit"
|
|
|
|
| 18 |
_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/wit/"
|
| 19 |
_URLS = {
|
| 20 |
+
'train': [_URL + f"part-{'%05d' % i}-48a6f07e-bb86-4735-aac7-883349f41a28-c000.json.gz" for i in range(400)]
|
|
|
|
| 21 |
}
|
| 22 |
|
| 23 |
+
|
| 24 |
class Wit(datasets.GeneratorBasedBuilder):
|
| 25 |
"""WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning"""
|
| 26 |
|
| 27 |
def _info(self):
|
| 28 |
return datasets.DatasetInfo(
|
| 29 |
description=_DESCRIPTION,
|
| 30 |
+
features=Features(
|
| 31 |
+
{
|
| 32 |
+
'b64_bytes': Value('string'),
|
| 33 |
+
'embedding': Sequence(Value('float64')),
|
| 34 |
+
'image_url': Value('string'),
|
| 35 |
+
'metadata_url': Value('string'),
|
| 36 |
+
'original_height': Value('int32'),
|
| 37 |
+
'original_width': Value('int32'),
|
| 38 |
+
'mime_type': Value('string'),
|
| 39 |
+
'caption_attribution_description': Value('string'),
|
| 40 |
+
'wit_features': Sequence(
|
| 41 |
+
{
|
| 42 |
+
"language": Value('string'),
|
| 43 |
+
"page_url": Value('string'),
|
| 44 |
+
"attribution_passes_lang_id": Value("string"),
|
| 45 |
+
"caption_alt_text_description": Value('string'),
|
| 46 |
+
"caption_reference_description": Value('string'),
|
| 47 |
+
"caption_title_and_reference_description": Value('string'),
|
| 48 |
+
"context_page_description": Value('string'),
|
| 49 |
+
"context_section_description": Value('string'),
|
| 50 |
+
"hierarchical_section_title": Value('string'),
|
| 51 |
+
"is_main_image": Value('string'),
|
| 52 |
+
"page_changed_recently": Value('string'),
|
| 53 |
+
"page_title": Value('string'),
|
| 54 |
+
"section_title": Value('string'),
|
| 55 |
+
}
|
| 56 |
+
),
|
| 57 |
+
}
|
| 58 |
+
),
|
| 59 |
homepage=_HOMEPAGE,
|
| 60 |
citation=_CITATION,
|
| 61 |
)
|
|
|
|
| 77 |
line = line.strip()
|
| 78 |
row_data = json.loads(line, encoding='utf-8')
|
| 79 |
for feature in row_data['wit_features']:
|
| 80 |
+
|
| 81 |
+
# If a feature is missing from feature dict, add it as None
|
| 82 |
for fname in wit_feature_names:
|
| 83 |
if fname not in feature:
|
| 84 |
feature[fname] = None
|
| 85 |
+
|
| 86 |
+
# Here we take redundant values from wit_features and add them to row_data to avoid unnecessary duplication
|
| 87 |
+
extra_wit_feature_keys = [k for k in feature.keys() if k not in wit_feature_names]
|
| 88 |
+
for k in extra_wit_feature_keys:
|
| 89 |
+
data = feature.pop(k)
|
| 90 |
+
if isinstance(data, list):
|
| 91 |
+
data = data[0]
|
| 92 |
+
row_data[k] = data
|
| 93 |
+
|
| 94 |
+
# Check row_data now for missing keys, adding None for most, but -1 for int features to avoid failures.
|
| 95 |
+
missing_keys = [x for x in self.info.features.keys() if x not in row_data]
|
| 96 |
+
for missing_key in missing_keys:
|
| 97 |
+
row_data[missing_key] = None if missing_key not in ['original_height', 'original_width'] else -1
|
| 98 |
yield str(i), row_data
|