-
Notifications
You must be signed in to change notification settings - Fork 1.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
94987f2
commit 03e9ea9
Showing
6 changed files
with
367 additions
and
12 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
############################################ | ||
# Network Architecture # | ||
############################################ | ||
cmvn_file: | ||
cmvn_file_type: "json" | ||
# encoder related | ||
encoder: conformer | ||
encoder_conf: | ||
output_size: 256 # dimension of attention | ||
attention_heads: 4 | ||
linear_units: 2048 # the number of units of position-wise feed forward | ||
num_blocks: 12 # the number of encoder blocks | ||
dropout_rate: 0.1 # sublayer output dropout | ||
positional_dropout_rate: 0.1 | ||
attention_dropout_rate: 0.0 | ||
input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 | ||
normalize_before: True | ||
cnn_module_kernel: 15 | ||
use_cnn_module: True | ||
activation_type: 'swish' | ||
pos_enc_layer_type: 'rpoe_pos' # abs_pos, rel_pos, rope_pos | ||
selfattention_layer_type: 'rel_selfattn' # unused | ||
causal: true | ||
use_dynamic_chunk: true | ||
cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster | ||
use_dynamic_left_chunk: false | ||
# decoder related | ||
decoder: transformer # transformer, bitransformer | ||
decoder_conf: | ||
attention_heads: 4 | ||
linear_units: 2048 | ||
num_blocks: 6 | ||
r_num_blocks: 3 # only for bitransformer | ||
dropout_rate: 0.1 # sublayer output dropout | ||
positional_dropout_rate: 0.1 | ||
self_attention_dropout_rate: 0.0 | ||
src_attention_dropout_rate: 0.0 | ||
# hybrid CTC/attention | ||
model_conf: | ||
ctc_weight: 0.3 | ||
lsm_weight: 0.1 # label smoothing option | ||
reverse_weight: 0.3 # only for bitransformer | ||
length_normalized_loss: false | ||
init_type: 'kaiming_uniform' # !Warning: need to convergence | ||
|
||
########################################### | ||
# Data # | ||
########################################### | ||
|
||
train_manifest: data/manifest.train | ||
dev_manifest: data/manifest.dev | ||
test_manifest: data/manifest.test | ||
|
||
|
||
########################################### | ||
# Dataloader # | ||
########################################### | ||
|
||
vocab_filepath: data/lang_char/vocab.txt | ||
spm_model_prefix: '' | ||
unit_type: 'char' | ||
preprocess_config: conf/preprocess.yaml | ||
feat_dim: 80 | ||
stride_ms: 10.0 | ||
window_ms: 25.0 | ||
sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs | ||
batch_size: 32 | ||
maxlen_in: 512 # if input length > maxlen-in, batchsize is automatically reduced | ||
maxlen_out: 150 # if output length > maxlen-out, batchsize is automatically reduced | ||
minibatches: 0 # for debug | ||
batch_count: auto | ||
batch_bins: 0 | ||
batch_frames_in: 0 | ||
batch_frames_out: 0 | ||
batch_frames_inout: 0 | ||
num_workers: 2 | ||
subsampling_factor: 1 | ||
num_encs: 1 | ||
|
||
########################################### | ||
# Training # | ||
########################################### | ||
n_epoch: 240 | ||
accum_grad: 1 | ||
global_grad_clip: 5.0 | ||
dist_sampler: True | ||
optim: adam | ||
optim_conf: | ||
lr: 0.001 | ||
weight_decay: 1.0e-6 | ||
scheduler: warmuplr | ||
scheduler_conf: | ||
warmup_steps: 25000 | ||
lr_decay: 1.0 | ||
log_interval: 100 | ||
checkpoint: | ||
kbest_n: 50 | ||
latest_n: 5 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
############################################ | ||
# Network Architecture # | ||
############################################ | ||
cmvn_file: | ||
cmvn_file_type: "json" | ||
# encoder related | ||
encoder: conformer | ||
encoder_conf: | ||
output_size: 256 # dimension of attention | ||
attention_heads: 4 | ||
linear_units: 2048 # the number of units of position-wise feed forward | ||
num_blocks: 12 # the number of encoder blocks | ||
dropout_rate: 0.1 # sublayer output dropout | ||
positional_dropout_rate: 0.1 | ||
attention_dropout_rate: 0.0 | ||
input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 | ||
normalize_before: True | ||
cnn_module_kernel: 15 | ||
use_cnn_module: True | ||
activation_type: 'swish' | ||
pos_enc_layer_type: 'rpoe_pos' # abs_pos, rel_pos, rope_pos | ||
selfattention_layer_type: 'rel_selfattn' # unused | ||
causal: true | ||
use_dynamic_chunk: true | ||
cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster | ||
use_dynamic_left_chunk: false | ||
# decoder related | ||
decoder: bitransformer # transformer, bitransformer | ||
decoder_conf: | ||
attention_heads: 4 | ||
linear_units: 2048 | ||
num_blocks: 3 | ||
r_num_blocks: 3 # only for bitransformer | ||
dropout_rate: 0.1 # sublayer output dropout | ||
positional_dropout_rate: 0.1 | ||
self_attention_dropout_rate: 0.0 | ||
src_attention_dropout_rate: 0.0 | ||
# hybrid CTC/attention | ||
model_conf: | ||
ctc_weight: 0.3 | ||
lsm_weight: 0.1 # label smoothing option | ||
reverse_weight: 0.3 # only for bitransformer | ||
length_normalized_loss: false | ||
init_type: 'kaiming_uniform' # !Warning: need to convergence | ||
|
||
########################################### | ||
# Data # | ||
########################################### | ||
|
||
train_manifest: data/manifest.train | ||
dev_manifest: data/manifest.dev | ||
test_manifest: data/manifest.test | ||
|
||
|
||
########################################### | ||
# Dataloader # | ||
########################################### | ||
|
||
vocab_filepath: data/lang_char/vocab.txt | ||
spm_model_prefix: '' | ||
unit_type: 'char' | ||
preprocess_config: conf/preprocess.yaml | ||
feat_dim: 80 | ||
stride_ms: 10.0 | ||
window_ms: 25.0 | ||
sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs | ||
batch_size: 32 | ||
maxlen_in: 512 # if input length > maxlen-in, batchsize is automatically reduced | ||
maxlen_out: 150 # if output length > maxlen-out, batchsize is automatically reduced | ||
minibatches: 0 # for debug | ||
batch_count: auto | ||
batch_bins: 0 | ||
batch_frames_in: 0 | ||
batch_frames_out: 0 | ||
batch_frames_inout: 0 | ||
num_workers: 2 | ||
subsampling_factor: 1 | ||
num_encs: 1 | ||
|
||
########################################### | ||
# Training # | ||
########################################### | ||
n_epoch: 240 | ||
accum_grad: 1 | ||
global_grad_clip: 5.0 | ||
dist_sampler: True | ||
optim: adam | ||
optim_conf: | ||
lr: 0.001 | ||
weight_decay: 1.0e-6 | ||
scheduler: warmuplr | ||
scheduler_conf: | ||
warmup_steps: 25000 | ||
lr_decay: 1.0 | ||
log_interval: 100 | ||
checkpoint: | ||
kbest_n: 50 | ||
latest_n: 5 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.