Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GruNonlinearityComponent(by Dan) and OutputGruNonlinearityCompone… #2712

Merged
merged 1 commit into from
Nov 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 15 additions & 10 deletions egs/swbd/s5c/local/chain/tuning/run_tdnn_opgru_1a.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,36 @@

# This is based on TDNN_LSTM_1b, but using the NormOPGRU to replace the LSTMP,
# and adding chunk-{left,right}-context-initial=0
# For the details of OPGRU structure, please check the paper
# "Output-Gate Projected Gated Recurrent Unit for Speech Recognition"
# by Gaofeng Cheng et al,
# https://www.danielpovey.com/files/2018_interspeech_opgru.pdf

# Different from the vanilla OPGRU, Norm-OPGRU adds batchnorm in its output (forward direction)
# and renorm in its recurrence. Experiments show that the TDNN-NormOPGRU could achieve similar
# results than TDNN-LSTMP and BLSTMP in both large or small data sets (80 ~ 2300 Hrs).

# ./local/chain/compare_wer_general.sh --looped tdnn_lstm_1e_sp tdnn_opgru_1a_sp
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be great if you can help me add the reference (https://www.danielpovey.com/files/2018_interspeech_opgru.pdf) to all the {O}PGRU scripts.

# System tdnn_lstm_1e_sp tdnn_opgru_1a_sp
# WER on train_dev(tg) 12.81 12.39
# [looped:] 12.93 12.32
# WER on train_dev(fg) 11.92 11.39
# [looped:] 12.07 11.35
# WER on train_dev(tg) 12.81 12.31
# [looped:] 12.93 12.26
# WER on train_dev(fg) 11.92 11.60
# [looped:] 12.07 11.65
# WER on eval2000(tg) 15.6 15.1
# [looped:] 16.0 15.1
# WER on eval2000(fg) 14.1 13.6
# WER on eval2000(fg) 14.1 13.5
# [looped:] 14.5 13.5
# Final train prob -0.065 -0.066
# Final valid prob -0.087 -0.085
# Final train prob (xent) -0.918 -0.889
# Final valid prob (xent) -1.0309 -0.9837
# Final train prob -0.065 -0.068
# Final valid prob -0.087 -0.091
# Final train prob (xent) -0.918 -0.879
# Final valid prob (xent) -1.0309 -0.9667



set -e

# configs for 'chain'
stage=12
stage=0
train_stage=-10
get_egs_stage=-10
speed_perturb=true
Expand Down
315 changes: 315 additions & 0 deletions egs/swbd/s5c/local/chain/tuning/run_tdnn_opgru_1b.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,315 @@
#!/bin/bash
# Apache 2.0

# This is based on TDNN_OPGRU_1A, but using the FastNormOPGRU to replace the NormPGRU.
# For the details of OPGRU structure, please check the paper
# "Output-Gate Projected Gated Recurrent Unit for Speech Recognition"
# by Gaofeng Cheng et al,
# https://www.danielpovey.com/files/2018_interspeech_opgru.pdf

# Different from the vanilla OPGRU, Norm-OPGRU adds batchnorm in its output (forward direction)
# and renorm in its recurrence. Experiments show that the TDNN-NormOPGRU could achieve similar
# results than TDNN-LSTMP and BLSTMP in both large or small data sets (80 ~ 2300 Hrs).

# ./local/chain/compare_wer_general.sh --looped tdnn_opgru_1a_sp tdnn_opgru_1b_sp
# System tdnn_opgru_1a_sp tdnn_opgru_1b_sp
# WER on train_dev(tg) 12.31 12.41
# [looped:] 12.26 12.38
# WER on train_dev(fg) 11.49 11.60
# [looped:] 11.43 11.65
# WER on eval2000(tg) 14.9 15.1
# [looped:] 15.0 15.1
# WER on eval2000(fg) 13.5 13.7
# [looped:] 13.5 13.7
# Final train prob -0.068 -0.070
# Final valid prob -0.091 -0.092
# Final train prob (xent) -0.879 -0.889
# Final valid prob (xent) -0.9667 -0.9723



set -e

# configs for 'chain'
stage=0
train_stage=-10
get_egs_stage=-10
speed_perturb=true
dir=exp/chain/tdnn_opgru_1b # Note: _sp will get added to this if $speed_perturb == true.
decode_iter=
decode_dir_affix=

# training options
leftmost_questions_truncate=-1
chunk_width=150
chunk_left_context=40
chunk_right_context=0
xent_regularize=0.025
self_repair_scale=0.00001
label_delay=5
dropout_schedule='0,[email protected],[email protected],0'
# decode options
extra_left_context=50
extra_right_context=0
frames_per_chunk=
test_online_decoding=

remove_egs=false
common_egs_dir=

affix=
# End configuration section.
echo "$0 $@" # Print the command line for logging

. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh

if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi

# The iVector-extraction and feature-dumping parts are the same as the standard
# nnet3 setup, and you can skip them by setting "--stage 8" if you have already
# run those things.

suffix=
if [ "$speed_perturb" == "true" ]; then
suffix=_sp
fi

dir=$dir${affix:+_$affix}
dir=${dir}$suffix
train_set=train_nodup$suffix
ali_dir=exp/tri4_ali_nodup$suffix
treedir=exp/chain/tri5_7d_tree$suffix
lang=data/lang_chain_2y


# if we are using the speed-perturbed data we need to generate
# alignments for it.
local/nnet3/run_ivector_common.sh --stage $stage \
--speed-perturb $speed_perturb \
--generate-alignments $speed_perturb || exit 1;


if [ $stage -le 9 ]; then
# Get the alignments as lattices (gives the CTC training more freedom).
# use the same num-jobs as the alignments
nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1;
steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \
data/lang exp/tri4 exp/tri4_lats_nodup$suffix
rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space
fi


if [ $stage -le 10 ]; then
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
rm -rf $lang
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi

if [ $stage -le 11 ]; then
# Build a tree using our new topology.
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
--leftmost-questions-truncate $leftmost_questions_truncate \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 7000 data/$train_set $lang $ali_dir $treedir
fi

if [ $stage -le 12 ]; then
echo "$0: creating neural net configs using the xconfig parser";

num_targets=$(tree-info $treedir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
gru_opts="dropout-per-frame=true dropout-proportion=0.0 gru-nonlinearity-options=\"max-change=0.75\""

mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input

# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat

# the first splicing is moved before the lda layer, so no splicing here
relu-batchnorm-layer name=tdnn1 dim=1024
relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=1024
relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024

# check steps/libs/nnet3/xconfig/gru.py for the other options and defaults
fast-norm-opgru-layer name=opgru1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $gru_opts
relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024
relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024
fast-norm-opgru-layer name=opgru2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $gru_opts
relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024
relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024
fast-norm-opgru-layer name=opgru3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $gru_opts

## adding the layers for chain branch
output-layer name=output input=opgru3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5

# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
output-layer name=output-xent input=opgru3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5

EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi

if [ $stage -le 13 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
fi

steps/nnet3/chain/train.py --stage $train_stage \
--cmd "$decode_cmd" \
--feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient 0.1 \
--chain.l2-regularize 0.00005 \
--chain.apply-deriv-weights false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.num-chunk-per-minibatch 64 \
--trainer.frames-per-iter 1200000 \
--trainer.max-param-change 2.0 \
--trainer.num-epochs 4 \
--trainer.optimization.shrink-value 0.99 \
--trainer.optimization.num-jobs-initial 3 \
--trainer.optimization.num-jobs-final 16 \
--trainer.optimization.initial-effective-lrate 0.001 \
--trainer.optimization.final-effective-lrate 0.0001 \
--trainer.optimization.momentum 0.0 \
--trainer.deriv-truncate-margin 8 \
--egs.stage $get_egs_stage \
--egs.opts "--frames-overlap-per-eg 0" \
--egs.chunk-width $chunk_width \
--egs.chunk-left-context $chunk_left_context \
--egs.chunk-right-context $chunk_right_context \
--trainer.dropout-schedule $dropout_schedule \
--egs.chunk-left-context-initial 0 \
--egs.chunk-right-context-final 0 \
--egs.dir "$common_egs_dir" \
--cleanup.remove-egs $remove_egs \
--feat-dir data/${train_set}_hires \
--tree-dir $treedir \
--lat-dir exp/tri4_lats_nodup$suffix \
--dir $dir || exit 1;
fi

if [ $stage -le 14 ]; then
# Note: it might appear that this $lang directory is mismatched, and it is as
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from
# the lang directory.
utils/mkgraph.sh --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg
fi

decode_suff=sw1_tg
graph_dir=$dir/graph_sw1_tg
if [ $stage -le 15 ]; then
[ -z $extra_left_context ] && extra_left_context=$chunk_left_context;
[ -z $extra_right_context ] && extra_right_context=$chunk_right_context;
[ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width;
iter_opts=
if [ ! -z $decode_iter ]; then
iter_opts=" --iter $decode_iter "
fi
for decode_set in train_dev eval2000; do
(
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj 50 --cmd "$decode_cmd" $iter_opts \
--extra-left-context $extra_left_context \
--extra-right-context $extra_right_context \
--extra-left-context-initial 0 \
--extra-right-context-final 0 \
--frames-per-chunk "$frames_per_chunk" \
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \
$graph_dir data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_${decode_suff} || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_sw1_{tg,fsh_fg} || exit 1;
fi
) &
done
fi

if $test_online_decoding && [ $stage -le 16 ]; then
# note: if the features change (e.g. you add pitch features), you will have to
# change the options of the following command line.
steps/online/nnet3/prepare_online_decoding.sh \
--mfcc-config conf/mfcc_hires.conf \
$lang exp/nnet3/extractor $dir ${dir}_online

rm $dir/.error 2>/dev/null || true
for decode_set in train_dev eval2000; do
(
# note: we just give it "$decode_set" as it only uses the wav.scp, the
# feature type does not matter.
steps/online/nnet3/decode.sh --nj 50 --cmd "$decode_cmd" $iter_opts \
--acwt 1.0 --post-decode-acwt 10.0 \
$graph_dir data/${decode_set}_hires \
${dir}_online/decode_${decode_set}${decode_iter:+_$decode_iter}_sw1_tg || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
${dir}_online/decode_${decode_set}${decode_iter:+_$decode_iter}_sw1_{tg,fsh_fg} || exit 1;
fi
) || touch $dir/.error &
done
wait
if [ -f $dir/.error ]; then
echo "$0: something went wrong in online decoding"
exit 1
fi
fi

if [ $stage -le 17 ]; then
rm $dir/.error 2>/dev/null || true
for decode_set in train_dev eval2000; do
(
steps/nnet3/decode_looped.sh \
--acwt 1.0 --post-decode-acwt 10.0 \
--nj 50 --cmd "$decode_cmd" $iter_opts \
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \
$graph_dir data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_iter:+_$decode_iter}_sw1_tg_looped || exit 1;
if $has_fisher; then
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \
$dir/decode_${decode_set}${decode_iter:+_$decode_iter}_sw1_{tg,fsh_fg}_looped || exit 1;
fi
) &
done
wait
if [ -f $dir/.error ]; then
echo "$0: something went wrong in looped decoding"
exit 1
fi
fi

wait;
exit 0;
Loading