nateraw commited on
Commit
33268ec
·
1 Parent(s): 3e5a6cd

Create wit.py

Browse files
Files changed (1) hide show
  1. wit.py +87 -0
wit.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import datasets
4
+ from datasets import Value, Sequence, Features
5
+
6
+
7
+ _CITATION = """\
8
+ @article{srinivasan2021wit,
9
+ title={WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning},
10
+ author={Srinivasan, Krishna and Raman, Karthik and Chen, Jiecao and Bendersky, Michael and Najork, Marc},
11
+ journal={arXiv preprint arXiv:2103.01913},
12
+ year={2021}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+ Wikipedia-based Image Text (WIT) Dataset is a large multimodal multilingual dataset. WIT is composed of a curated set
18
+ of 37.6 million entity rich image-text examples with 11.5 million unique images across 108 Wikipedia languages. Its
19
+ size enables WIT to be used as a pretraining dataset for multimodal machine learning models.
20
+ """
21
+
22
+ _HOMEPAGE = "https://github.com/google-research-datasets/wit"
23
+
24
+ _URL = "https://storage.googleapis.com/huggingface-nlp/datasets/wit/"
25
+ _URLS = {
26
+ # TODO - This should be in range(400). Haven't mirrored all the files yet.
27
+ 'train': [_URL + f"part-{'%05d' % i}-48a6f07e-bb86-4735-aac7-883349f41a28-c000.json.gz" for i in range(2)]
28
+ }
29
+
30
+ class Wit(datasets.GeneratorBasedBuilder):
31
+ """WIT: Wikipedia-based Image Text Dataset for Multimodal Multilingual Machine Learning"""
32
+
33
+ def _info(self):
34
+ return datasets.DatasetInfo(
35
+ description=_DESCRIPTION,
36
+ features=Features({
37
+ 'b64_bytes': Value('string'),
38
+ 'embedding': Sequence(Value('float64')),
39
+ 'image_url': Value('string'),
40
+ 'metadata_url': Value('string'),
41
+ 'wit_features': Sequence({
42
+ "language": Value('string'),
43
+ "page_url": Value('string'),
44
+ "image_url": Value('string'),
45
+ "attribution_passes_lang_id": Value("string"),
46
+ "caption_alt_text_description": Value('string'),
47
+ "caption_attribution_description": Value('string'),
48
+ "caption_reference_description": Value('string'),
49
+ "caption_title_and_reference_description": Value('string'),
50
+ "context_page_description": Value('string'),
51
+ "context_section_description": Value('string'),
52
+ "hierarchical_section_title": Value('string'),
53
+ "is_main_image": Value('string'),
54
+ "mime_type": Value('string'),
55
+ "original_height": Value('string'),
56
+ "original_width": Value('string'),
57
+ "page_changed_recently": Value('string'),
58
+ "page_title": Value('string'),
59
+ "section_title": Value('string'),
60
+ })
61
+ }),
62
+ homepage=_HOMEPAGE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ """Returns SplitGenerators."""
68
+ urls_to_download = _URLS
69
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
70
+ print(downloaded_files)
71
+ return [
72
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files["train"]}),
73
+ ]
74
+
75
+ def _generate_examples(self, filepaths):
76
+ """Yields examples."""
77
+ wit_feature_names = self.info.features['wit_features'].feature.keys()
78
+ for filepath in filepaths:
79
+ with open(filepath, "rb") as f:
80
+ for i, line in enumerate(f):
81
+ line = line.strip()
82
+ row_data = json.loads(line, encoding='utf-8')
83
+ for feature in row_data['wit_features']:
84
+ for fname in wit_feature_names:
85
+ if fname not in feature:
86
+ feature[fname] = None
87
+ yield str(i), row_data