Upload tokenizer
Browse files- special_tokens_map.json +15 -1
- tokenizer.json +7 -7
- tokenizer_config.json +21 -1
special_tokens_map.json
CHANGED
@@ -1 +1,15 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "[CLS]",
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"eos_token": "[SEP]",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "[MASK]",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "[SEP]",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
CHANGED
@@ -179,19 +179,19 @@
|
|
179 |
],
|
180 |
[
|
181 |
"t",
|
182 |
-
-0.
|
183 |
],
|
184 |
[
|
185 |
-
"
|
186 |
-
-1.
|
187 |
],
|
188 |
[
|
189 |
-
"
|
190 |
-
-1.
|
191 |
],
|
192 |
[
|
193 |
-
"
|
194 |
-
-1.
|
195 |
]
|
196 |
]
|
197 |
}
|
|
|
179 |
],
|
180 |
[
|
181 |
"t",
|
182 |
+
-0.9314656811857444
|
183 |
],
|
184 |
[
|
185 |
+
"▁",
|
186 |
+
-1.6502370843623957
|
187 |
],
|
188 |
[
|
189 |
+
"x",
|
190 |
+
-1.6502370843623972
|
191 |
],
|
192 |
[
|
193 |
+
"e",
|
194 |
+
-1.6502370843623972
|
195 |
]
|
196 |
]
|
197 |
}
|
tokenizer_config.json
CHANGED
@@ -1 +1,21 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "[CLS]",
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_lower_case": true,
|
5 |
+
"eos_token": "[SEP]",
|
6 |
+
"keep_accents": false,
|
7 |
+
"mask_token": {
|
8 |
+
"__type": "AddedToken",
|
9 |
+
"content": "[MASK]",
|
10 |
+
"lstrip": true,
|
11 |
+
"normalized": false,
|
12 |
+
"rstrip": false,
|
13 |
+
"single_word": false
|
14 |
+
},
|
15 |
+
"model_max_length": 1000000000000000019884624838656,
|
16 |
+
"pad_token": "<pad>",
|
17 |
+
"remove_space": true,
|
18 |
+
"sep_token": "[SEP]",
|
19 |
+
"tokenizer_class": "AlbertTokenizer",
|
20 |
+
"unk_token": "<unk>"
|
21 |
+
}
|