forked from EleutherAI/gpt-neox
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1,571 changed files
with
222,581 additions
and
85 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
{ | ||
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages | ||
# across the node boundaries ) | ||
"pipe-parallel-size": 1, | ||
"model-parallel-size": 1, | ||
|
||
|
||
# model settings | ||
"num-layers": 24, | ||
"hidden-size": 2048, | ||
"num-attention-heads": 16, | ||
"seq-length": 2048, | ||
"max-position-embeddings": 2048, | ||
"norm": "layernorm", | ||
"pos-emb": "rotary", | ||
"no-weight-tying": true, | ||
"gpt_j_residual": false, | ||
"output_layer_parallelism": "column", | ||
"attention_config": [[["flash"], 24]], | ||
|
||
|
||
# these should provide some speedup but takes a while to build, set to true if desired | ||
"scaled-upper-triang-masked-softmax-fusion": false, | ||
"bias-gelu-fusion": false, | ||
|
||
# init methods | ||
"init_method": "small_init", | ||
"output_layer_init_method": "wang_init", | ||
|
||
# optimizer settings | ||
"optimizer": { | ||
"type": "Adam", | ||
"params": { | ||
"lr": 0.0002, | ||
"betas": [0.9, 0.95], | ||
"eps": 1.0e-8, | ||
} | ||
}, | ||
"min_lr": 0.00002, | ||
|
||
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training | ||
"zero_optimization": { | ||
"stage": 1, | ||
"allgather_partitions": True, | ||
"allgather_bucket_size": 500000000, | ||
"overlap_comm": True, | ||
"reduce_scatter": True, | ||
"reduce_bucket_size": 500000000, | ||
"contiguous_gradients": True, | ||
}, | ||
|
||
# batch / data settings | ||
"train_micro_batch_size_per_gpu": 64, | ||
"data-impl": "mmap", | ||
|
||
# activation checkpointing | ||
"checkpoint-activations": true, | ||
"checkpoint-num-layers": 1, | ||
"partition-activations": true, | ||
"synchronize-each-layer": true, | ||
|
||
# regularization | ||
"gradient_clipping": 1.0, | ||
"weight-decay": 0.1, | ||
"hidden-dropout": 0, | ||
"attention-dropout": 0, | ||
|
||
# precision settings | ||
# "fp16": { | ||
# "fp16": true, | ||
# "enabled": true, | ||
# "loss_scale": 0, | ||
# "loss_scale_window": 1000, | ||
# "hysteresis": 2, | ||
# "min_loss_scale": 1 | ||
# }, | ||
|
||
"bf16": { | ||
"enabled": true | ||
}, | ||
|
||
# misc. training settings | ||
"train-iters": 60000, | ||
"lr-decay-iters": 60000, | ||
"distributed-backend": "nccl", | ||
"lr-decay-style": "cosine", | ||
"warmup": 0.01, | ||
"checkpoint-factor": 5000, | ||
"eval-interval": 9999999, | ||
"eval-iters": 100, | ||
|
||
# logging | ||
"log-interval": 5, | ||
"steps_per_print": 5, | ||
"keep-last-n-checkpoints": 4, | ||
"wall_clock_breakdown": true, | ||
|
||
"vocab-file": "/fs/archive/share/yulan/tokenizer/yulan_v1/tokenizer.model", | ||
"tokenizer_type": "LlamaTokenizer", | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
# GPT-2 pretraining setup | ||
{ | ||
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages | ||
# across the node boundaries ) | ||
"pipe-parallel-size": 1, | ||
"model-parallel-size": 1, | ||
|
||
|
||
# model settings | ||
"num-layers": 24, | ||
"hidden-size": 2048, | ||
"num-attention-heads": 16, | ||
"seq-length": 2048, | ||
"max-position-embeddings": 2048, | ||
"norm": "layernorm", | ||
"pos-emb": "rotary", | ||
"no-weight-tying": true, | ||
"gpt_j_residual": false, | ||
"output_layer_parallelism": "column", | ||
"attention_config": [[["flash"], 24]], | ||
|
||
|
||
# these should provide some speedup but takes a while to build, set to true if desired | ||
"scaled-upper-triang-masked-softmax-fusion": false, | ||
"bias-gelu-fusion": false, | ||
|
||
# init methods | ||
"init_method": "small_init", | ||
"output_layer_init_method": "wang_init", | ||
|
||
# optimizer settings | ||
"optimizer": { | ||
"type": "Adam", | ||
"params": { | ||
"lr": 0.0002, | ||
"betas": [0.9, 0.95], | ||
"eps": 1.0e-8, | ||
} | ||
}, | ||
"min_lr": 0.00002, | ||
|
||
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training | ||
"zero_optimization": { | ||
"stage": 1, | ||
"allgather_partitions": True, | ||
"allgather_bucket_size": 500000000, | ||
"overlap_comm": True, | ||
"reduce_scatter": True, | ||
"reduce_bucket_size": 500000000, | ||
"contiguous_gradients": True, | ||
}, | ||
|
||
# batch / data settings | ||
"train_micro_batch_size_per_gpu": 16, | ||
"data-impl": "mmap", | ||
|
||
# activation checkpointing | ||
"checkpoint-activations": true, | ||
"checkpoint-num-layers": 1, | ||
"partition-activations": true, | ||
"synchronize-each-layer": true, | ||
|
||
# regularization | ||
"gradient_clipping": 1.0, | ||
"weight-decay": 0.1, | ||
"hidden-dropout": 0, | ||
"attention-dropout": 0, | ||
|
||
# precision settings | ||
# "fp16": { | ||
# "fp16": true, | ||
# "enabled": true, | ||
# "loss_scale": 0, | ||
# "loss_scale_window": 1000, | ||
# "hysteresis": 2, | ||
# "min_loss_scale": 1 | ||
# }, | ||
|
||
"bf16": { | ||
"enabled": true | ||
}, | ||
|
||
# misc. training settings | ||
"train-iters": 10, | ||
"lr-decay-iters": 10, | ||
"distributed-backend": "nccl", | ||
"lr-decay-style": "cosine", | ||
"warmup": 0.01, | ||
"checkpoint-factor": 1000, | ||
"eval-interval": 9999999, | ||
"eval-iters": 10, | ||
|
||
# logging | ||
"log-interval": 5, | ||
"steps_per_print": 5, | ||
"keep-last-n-checkpoints": 5, | ||
"wall_clock_breakdown": true, | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
# Suggested data paths when using GPT-NeoX locally | ||
{ | ||
# "data-path": "./data/pile_00_text_document", | ||
# or for weighted datasets: | ||
# [60237255867, 19377987678, 14319702200, 4109669539, 29457192315, 7555184698, 24906226377, 12067123213] | ||
"train-data-paths": [ | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/zhihu/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/wiki_cn/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/cicg_cn/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/books_cn/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/law/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/pilecc/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/github/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/wiki_en/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/books_en/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/gutenberg/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/arxiv/train_0_text_document", | ||
"/fs/archive/share/yulan/tokenized_data/yulan_v1/stack_exchange/train_0_text_document", | ||
], | ||
|
||
"test-data-paths": ["/fs/archive/share/yulan/tokenized_data/yulan_v1/wiki_en/train_0_text_document"], | ||
"valid-data-paths": ["/fs/archive/share/yulan/tokenized_data/yulan_v1/wiki_en/train_0_text_document"], | ||
"train-data-weights": [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], | ||
"test-data-weights": [1.], | ||
"valid-data-weights": [1.], | ||
|
||
# If weight_by_num_documents is True, Builds dataset weights from a multinomial distribution over groups of data according to the number of documents in each group. | ||
# WARNING: setting this to True will override any user provided weights | ||
# "weight_by_num_documents": false, | ||
# "weighted_sampler_alpha": 0.3, | ||
|
||
"vocab-file": "/fs/archive/share/yulan/tokenizer/yulan_v1/tokenizer.model", | ||
"tokenizer_type": "LlamaTokenizer", | ||
|
||
"save": "/fs/fast/share/jarvis/checkpoints/1-3B/test_data", | ||
"load": "/fs/fast/share/jarvis/checkpoints/1-3B/test_data", | ||
"checkpoint_validation_with_forward_pass": False, | ||
|
||
"finetune": True, | ||
# "initialize_llama_cn_with_llama_word_embeddings": True, # expand vocab | ||
|
||
"use_wandb": True, | ||
"wandb_host": "https://api.wandb.ai", | ||
"wandb_project": "1-3B_test_data", | ||
"wandb_team": "jarvis_llm" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
{ | ||
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages | ||
# across the node boundaries ) | ||
"pipe-parallel-size": 1, | ||
"model-parallel-size": 1, | ||
|
||
|
||
# model settings | ||
"num-layers": 24, | ||
"hidden-size": 2048, | ||
"num-attention-heads": 16, | ||
"seq-length": 2048, | ||
"max-position-embeddings": 2048, | ||
"norm": "layernorm", | ||
"pos-emb": "rotary", | ||
"no-weight-tying": true, | ||
"gpt_j_residual": false, | ||
"output_layer_parallelism": "column", | ||
"attention_config": [[["flash"], 24]], | ||
|
||
|
||
# these should provide some speedup but takes a while to build, set to true if desired | ||
"scaled-upper-triang-masked-softmax-fusion": false, | ||
"bias-gelu-fusion": false, | ||
|
||
# init methods | ||
"init_method": "small_init", | ||
"output_layer_init_method": "wang_init", | ||
|
||
# optimizer settings | ||
"optimizer": { | ||
"type": "Adam", | ||
"params": { | ||
"lr": 0.0002, | ||
"betas": [0.9, 0.95], | ||
"eps": 1.0e-8, | ||
} | ||
}, | ||
"min_lr": 0.00002, | ||
|
||
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training | ||
"zero_optimization": { | ||
"stage": 1, | ||
"allgather_partitions": True, | ||
"allgather_bucket_size": 500000000, | ||
"overlap_comm": True, | ||
"reduce_scatter": True, | ||
"reduce_bucket_size": 500000000, | ||
"contiguous_gradients": True, | ||
}, | ||
|
||
# batch / data settings | ||
"train_micro_batch_size_per_gpu": 64, | ||
"data-impl": "mmap", | ||
|
||
# activation checkpointing | ||
"checkpoint-activations": true, | ||
"checkpoint-num-layers": 1, | ||
"partition-activations": true, | ||
"synchronize-each-layer": true, | ||
|
||
# regularization | ||
"gradient_clipping": 1.0, | ||
"weight-decay": 0.1, | ||
"hidden-dropout": 0, | ||
"attention-dropout": 0, | ||
|
||
# precision settings | ||
# "fp16": { | ||
# "fp16": true, | ||
# "enabled": true, | ||
# "loss_scale": 0, | ||
# "loss_scale_window": 1000, | ||
# "hysteresis": 2, | ||
# "min_loss_scale": 1 | ||
# }, | ||
|
||
"bf16": { | ||
"enabled": true | ||
}, | ||
|
||
# misc. training settings | ||
"train-iters": 60000, | ||
"lr-decay-iters": 60000, | ||
"distributed-backend": "nccl", | ||
"lr-decay-style": "cosine", | ||
"warmup": 0.01, | ||
"checkpoint-factor": 5000, | ||
"eval-interval": 9999999, | ||
"eval-iters": 100, | ||
|
||
# logging | ||
"log-interval": 5, | ||
"steps_per_print": 5, | ||
"keep-last-n-checkpoints": 4, | ||
"wall_clock_breakdown": true, | ||
|
||
"tokenizer_type": "LlamaTokenizer", | ||
"vocab-file": "/fs/fast/share/jarvis/tokenizer/jarvis_tokenizer_v2/tokenizer.model", | ||
} |
Oops, something went wrong.