forked from SeanNaren/deepspeech.pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
example.py
59 lines (46 loc) · 1.94 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# Continuing from previous training state
if cfg.checkpointing.continue_from:
state = TrainingState.load_state(state_path=to_absolute_path(cfg.checkpointing.continue_from))
model = state.model
if cfg.training.finetune:
state.init_finetune_states(cfg.training.epochs)
# Restore visualization metrics
if cfg.visualization.visdom:
visdom_logger.load_previous_values(state.epoch, state.results)
if cfg.visualization.tensorboard:
tensorboard_logger.load_previous_values(state.epoch, state.results)
else:
...
# Initialize new training state
state = TrainingState(model=model)
state.init_results_tracking(epochs=cfg.training.epochs)
...
model, optimizer = amp.initialize(model, optimizer,
opt_level=cfg.apex.opt_level,
loss_scale=cfg.apex.loss_scale)
# Load previous optimizer/Automatic Mixed Precision (AMP) states before training begins
if state.optim_state is not None:
optimizer.load_state_dict(state.optim_state)
amp.load_state_dict(state.amp_state)
# Track states for optimizer/AMP
state.track_optim_state(optimizer)
state.track_amp_state(amp)
...
# Begin DeepSpeech training
for epoch in range(state.epoch, cfg.training.epochs):
state.set_epoch(epoch=epoch)
...
for i, (data) in enumerate(train_loader, start=state.training_step):
state.set_training_step(training_step=i)
... # Training step
state.avg_loss += loss_value # Record loss value in state
# Record end of epoch loss
state.avg_loss /= len(train_dataset)
# Record metrics for visualization
state.add_results(epoch=epoch,
loss_result=state.avg_loss,
wer_result=wer,
cer_result=cer)
# Save model state
checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state)
state.reset_training_step() # Reset state training step for next epoch