IlyasMoutawwakil HF Staff commited on
Commit
771f293
·
verified ·
1 Parent(s): 4d6d44b

Mirror from hf-internal-testing/tiny-xlm-roberta

Browse files
.gitattributes CHANGED
@@ -1,35 +1,17 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
  *.joblib filter=lfs diff=lfs merge=lfs -text
 
 
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ model.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ---
2
+ {}
3
+ ---
4
+ This is a tiny random {mname_tiny} model to be used for basic testing
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaForCausalLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "d_ff": 256,
9
+ "d_kv": 8,
10
+ "d_model": 64,
11
+ "eos_token_id": 2,
12
+ "gradient_checkpointing": false,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 256,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 256,
18
+ "layer_norm_eps": 1e-05,
19
+ "max_position_embeddings": 64,
20
+ "model_type": "xlm-roberta",
21
+ "num_attention_heads": 2,
22
+ "num_decoder_layers": 2,
23
+ "num_heads": 2,
24
+ "num_hidden_layers": 2,
25
+ "num_layers": 2,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
+ "position_embedding_type": "absolute",
29
+ "relative_attention_num_buckets": 32,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.52.0.dev0",
32
+ "type_vocab_size": 1,
33
+ "use_cache": true,
34
+ "vocab_size": 5002
35
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.52.0.dev0"
7
+ }
make-tiny-xlm-roberta.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # This script creates a tiny random model
18
+ #
19
+ # It will be used then as "hf-internal-testing/tiny-xlm-roberta"
20
+
21
+ # ***To build from scratch***
22
+ #
23
+ # 1. clone sentencepiece into a parent dir
24
+ # git clone https://github.com/google/sentencepiece
25
+ #
26
+ # 2. create a new repo at https://huggingface.co/new
27
+ # make sure to choose 'hf-internal-testing' as the Owner
28
+ #
29
+ # 3. clone
30
+ # git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
31
+ # cd tiny-xlm-roberta
32
+ #
33
+ # 4. start with some pre-existing script from one of the https://huggingface.co/hf-internal-testing/ tiny model repos, e.g.
34
+ # wget https://huggingface.co/hf-internal-testing/tiny-albert/raw/main/make-tiny-albert.py
35
+ # chmod a+x ./make-tiny-albert.py
36
+ # mv ./make-tiny-albert.py ./make-tiny-xlm-roberta.py
37
+ #
38
+ # 5. automatically rename things from the old names to new ones
39
+ # perl -pi -e 's|Albert|XLMRoberta|g' make-tiny-xlm-roberta.py
40
+ # perl -pi -e 's|albert|xlm-roberta|g' make-tiny-xlm-roberta.py
41
+ #
42
+ # 6. edit and re-run this script while fixing it up
43
+ # ./make-tiny-xlm-roberta.py
44
+ #
45
+ # 7. add/commit/push
46
+ # git add *
47
+ # git commit -m "new tiny model"
48
+ # git push
49
+
50
+ # ***To update***
51
+ #
52
+ # 1. clone the existing repo
53
+ # git clone https://huggingface.co/hf-internal-testing/tiny-xlm-roberta
54
+ # cd tiny-xlm-roberta
55
+ #
56
+ # 2. edit and re-run this script after doing whatever changes are needed
57
+ # ./make-tiny-xlm-roberta.py
58
+ #
59
+ # 3. commit/push
60
+ # git commit -m "new tiny model"
61
+ # git push
62
+
63
+ import sys
64
+ import os
65
+
66
+ # workaround for fast tokenizer protobuf issue, and it's much faster too!
67
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
68
+
69
+ from transformers import XLMRobertaTokenizerFast, XLMRobertaConfig, XLMRobertaForCausalLM
70
+
71
+ mname_orig = "xlm-roberta-base"
72
+ mname_tiny = "tiny-xlm-roberta"
73
+
74
+ ### Tokenizer
75
+
76
+ # Shrink the orig vocab to keep things small
77
+ vocab_keep_items = 5000
78
+ tmp_dir = f"/tmp/{mname_tiny}"
79
+ vocab_orig_path = f"{tmp_dir}/sentencepiece.bpe.model"
80
+ vocab_short_path = f"{tmp_dir}/spiece-short.model"
81
+ if 1: # set to 0 to skip this after running once to speed things up during tune up
82
+ # HACK: need the sentencepiece source to get sentencepiece_model_pb2, as it doesn't get installed
83
+ sys.path.append("../sentencepiece/python/src/sentencepiece")
84
+ import sentencepiece_model_pb2 as model
85
+ tokenizer_orig = XLMRobertaTokenizerFast.from_pretrained(mname_orig)
86
+ tokenizer_orig.save_pretrained(tmp_dir)
87
+ with open(vocab_orig_path, 'rb') as f: data = f.read()
88
+ # adapted from https://blog.ceshine.net/post/trim-down-sentencepiece-vocabulary/
89
+ m = model.ModelProto()
90
+ m.ParseFromString(data)
91
+ print(f"Shrinking vocab from original {len(m.pieces)} dict items")
92
+ for i in range(len(m.pieces) - vocab_keep_items): _ = m.pieces.pop()
93
+ print(f"new dict {len(m.pieces)}")
94
+ with open(vocab_short_path, 'wb') as f: f.write(m.SerializeToString())
95
+ m = None
96
+
97
+ tokenizer_fast_tiny = XLMRobertaTokenizerFast(vocab_file=vocab_short_path)
98
+
99
+ ### Config
100
+
101
+ config_tiny = XLMRobertaConfig.from_pretrained(mname_orig)
102
+ print(config_tiny)
103
+ # remember to update this to the actual config as each model is different and then shrink the numbers
104
+ config_tiny.update(dict(
105
+ vocab_size=vocab_keep_items+12,
106
+ d_ff=256,
107
+ d_kv=8,
108
+ d_model=64,
109
+ hidden_size=256,
110
+ intermediate_size=256,
111
+ max_position_embeddings=64,
112
+ num_attention_heads=2,
113
+ num_decoder_layers=2,
114
+ num_heads=2,
115
+ num_hidden_layers=2,
116
+ num_layers=2,
117
+ relative_attention_num_buckets=32,
118
+ ))
119
+ print("New config", config_tiny)
120
+
121
+ ### Model
122
+
123
+ model_tiny = XLMRobertaForCausalLM(config_tiny)
124
+ print(f"{mname_tiny}: num of params {model_tiny.num_parameters()}")
125
+ model_tiny.resize_token_embeddings(len(tokenizer_fast_tiny))
126
+
127
+ # Test
128
+ inputs = tokenizer_fast_tiny("hello", return_tensors="pt")
129
+ outputs = model_tiny(**inputs)
130
+ print("Test with fast tokenizer:", len(outputs.logits[0]))
131
+
132
+ # Save
133
+ model_tiny.half() # makes it smaller
134
+ model_tiny.save_pretrained(".")
135
+ tokenizer_fast_tiny.save_pretrained(".")
136
+
137
+ readme = "README.md"
138
+ if not os.path.exists(readme):
139
+ with open(readme, "w") as f:
140
+ f.write(f"This is a {mname_tiny} random model to be used for basic testing.\n")
141
+
142
+ print(f"Generated {mname_tiny}")
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a325e5b573602ec69abc919b817e7a22c66cc54cf2823ff79c7f024caa947251
3
+ size 8646808
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb0bccafb4bee811f2138956ea9e94596e1bfdfc868b5364d7b678fac4b2d559
3
+ size 4334436
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef67a60b933d0da430d4b839301301ada0179b0c71102b0eef4567386faa1588
3
+ size 309222
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "tokenizer_class": "XLMRobertaTokenizer"}