patrickvonplaten
commited on
Commit
·
919b2aa
1
Parent(s):
01e34b6
upload
Browse files- config.json +1 -2
- create_model_files.py +48 -0
- preprocessor_config.json +9 -0
- pytorch_model.bin +1 -1
- run.sh +2 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
config.json
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "./wav2vec2_tiny_random/",
|
3 |
"activation_dropout": 0.1,
|
4 |
"apply_spec_augment": true,
|
5 |
"architectures": [
|
@@ -48,5 +47,5 @@
|
|
48 |
"num_hidden_layers": 2,
|
49 |
"pad_token_id": 0,
|
50 |
"transformers_version": "4.6.0.dev0",
|
51 |
-
"vocab_size":
|
52 |
}
|
|
|
1 |
{
|
|
|
2 |
"activation_dropout": 0.1,
|
3 |
"apply_spec_augment": true,
|
4 |
"architectures": [
|
|
|
47 |
"num_hidden_layers": 2,
|
48 |
"pad_token_id": 0,
|
49 |
"transformers_version": "4.6.0.dev0",
|
50 |
+
"vocab_size": 12
|
51 |
}
|
create_model_files.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
from transformers import Wav2Vec2Processor, Wav2Vec2FeatureExtractor, Wav2Vec2CTCTokenizer, Wav2Vec2Config, Wav2Vec2Model
|
3 |
+
import json
|
4 |
+
|
5 |
+
|
6 |
+
conf = Wav2Vec2Config()
|
7 |
+
|
8 |
+
conf.conv_dim = [64, 64]
|
9 |
+
conf.conv_kernel = [40, 40]
|
10 |
+
conf.conv_stride = [30, 30]
|
11 |
+
conf.num_feat_extract_layers = 2
|
12 |
+
|
13 |
+
conf.hidden_size = 64
|
14 |
+
conf.num_conv_pos_embeddings = 64
|
15 |
+
conf.num_hidden_layers = 2
|
16 |
+
conf.vocab_size = 12
|
17 |
+
conf.intermediate_size = 128
|
18 |
+
conf.num_conv_pos_embedding_groups = 4
|
19 |
+
conf.num_attention_heads = 2
|
20 |
+
|
21 |
+
model = Wav2Vec2Model(conf)
|
22 |
+
|
23 |
+
vocab = {
|
24 |
+
"a": 0,
|
25 |
+
"b": 1,
|
26 |
+
"c": 2,
|
27 |
+
"d": 3,
|
28 |
+
"e": 4,
|
29 |
+
"f": 5,
|
30 |
+
"g": 6,
|
31 |
+
"<s>": 7,
|
32 |
+
"</s>": 8,
|
33 |
+
"<unk>": 9,
|
34 |
+
"<pad>": 10,
|
35 |
+
"|": 11
|
36 |
+
}
|
37 |
+
|
38 |
+
with open("vocab.json", "w") as f:
|
39 |
+
f.write(json.dumps(vocab, ensure_ascii=False))
|
40 |
+
|
41 |
+
|
42 |
+
tok = Wav2Vec2CTCTokenizer("./vocab.json")
|
43 |
+
extract = Wav2Vec2FeatureExtractor()
|
44 |
+
|
45 |
+
processor = Wav2Vec2Processor(tokenizer=tok, feature_extractor=extract)
|
46 |
+
|
47 |
+
processor.save_pretrained("wav2vec2_tiny_random")
|
48 |
+
model.save_pretrained("wav2vec2_tiny_random")
|
preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"return_attention_mask": false,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1232269
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec0c03140661697880dc1345563e692cc762a4a78c17bdce138c2180abfe7691
|
3 |
size 1232269
|
run.sh
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
python ../transformers/examples/research_projects/wav2vec2/run_asr.py --output_dir=output_dir --num_train_epochs=1 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --evaluation_strategy=steps --save_steps=500 --eval_steps=100 --logging_steps=50 --learning_rate=5e-4 --warmup_steps=3000 --model_name_or_path=./wav2vec2_tiny_random --dataset_name=patrickvonplaten/librispeech_asr_dummy --dataset_config_name=clean --train_split_name=validation --validation_split_name=validation --orthography=timit --preprocessing_num_workers=1 --group_by_length --freeze_feature_extractor --verbose_logging
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "<s>": 7, "</s>": 8, "<unk>": 9, "<pad>": 10, "|": 11}
|