-
Notifications
You must be signed in to change notification settings - Fork 998
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* Add DeepSpeed MoE Thanks to dayofthepenguin for extensive testing Closes #479 * Update NeoXArgs docs automatically * pre-commit * Update NeoXArgs docs automatically --------- Co-authored-by: Yang Zhang <[email protected]> Co-authored-by: github-actions <[email protected]> Co-authored-by: Quentin Anthony <[email protected]>
- Loading branch information
1 parent
df8cf24
commit 86758c3
Showing
10 changed files
with
434 additions
and
31 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
# GPT-2 pretraining setup | ||
{ | ||
# Have 4 experts per layer (every 2 layers by default) | ||
# So with 12 layers total: | ||
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 | ||
# Experts would be in layers: | ||
# 0, 2, 4, 6, 8, 10 | ||
"num_experts": 4, | ||
|
||
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages | ||
# across the node boundaries ) | ||
"pipe_parallel_size": 1, | ||
"model_parallel_size": 1, | ||
"moe_expert_parallel_size": 1, | ||
|
||
# model settings | ||
"num_layers": 12, | ||
"hidden_size": 768, | ||
"num_attention_heads": 12, | ||
"seq_length": 2048, | ||
"max_position_embeddings": 2048, | ||
"norm": "layernorm", | ||
"pos_emb": "rotary", | ||
"no_weight_tying": true, | ||
"gpt_j_residual": false, | ||
"output_layer_parallelism": "column", | ||
|
||
# these should provide some speedup but takes a while to build, set to true if desired | ||
"scaled_upper_triang_masked_softmax_fusion": false, | ||
"bias_gelu_fusion": false, | ||
"rope_fusion": false, | ||
|
||
# init methods | ||
"init_method": "small_init", | ||
"output_layer_init_method": "wang_init", | ||
|
||
|
||
# optimizer settings | ||
"optimizer": { | ||
"type": "Adam", | ||
"params": { | ||
"lr": 0.0006, | ||
"betas": [0.9, 0.95], | ||
"eps": 1.0e-8, | ||
} | ||
}, | ||
"min_lr": 0.00006, | ||
|
||
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training | ||
"zero_optimization": { | ||
"stage": 1, | ||
"allgather_partitions": True, | ||
"allgather_bucket_size": 500000000, | ||
"overlap_comm": True, | ||
"reduce_scatter": True, | ||
"reduce_bucket_size": 500000000, | ||
"contiguous_gradients": True, | ||
}, | ||
|
||
# batch / data settings | ||
"train_micro_batch_size_per_gpu": 4, | ||
"data_impl": "mmap", | ||
|
||
# activation checkpointing | ||
"checkpoint_activations": true, | ||
"checkpoint_num_layers": 1, | ||
"partition_activations": true, | ||
"synchronize_each_layer": true, | ||
|
||
# regularization | ||
"gradient_clipping": 1.0, | ||
"weight_decay": 0.1, | ||
"hidden_dropout": 0.0, | ||
"attention_dropout": 0.0, | ||
|
||
# precision settings | ||
"fp16": { | ||
"enabled": true, | ||
"loss_scale": 0, | ||
"loss_scale_window": 1000, | ||
"hysteresis": 2, | ||
"min_loss_scale": 1 | ||
}, | ||
|
||
# misc. training settings | ||
"train_iters": 320000, | ||
"lr_decay_iters": 320000, | ||
"distributed_backend": "nccl", | ||
"lr_decay_style": "cosine", | ||
"warmup": 0.01, | ||
"checkpoint_factor": 10000, | ||
"eval_interval": 1000, | ||
"eval_iters": 10, | ||
|
||
# logging | ||
"log_interval": 10, | ||
"steps_per_print": 10, | ||
"keep_last_n_checkpoints": 4, | ||
"wall_clock_breakdown": true, | ||
|
||
# networking | ||
"hostfile": "/mock_path" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.