forked from mayank31398/BigCode-Megatron-LM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pretrain_gpt_1B_santacoder.sh
62 lines (54 loc) · 1.68 KB
/
pretrain_gpt_1B_santacoder.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#! /bin/bash
set -u # stop on unset variables
# Runs the SantaCoder 1B model
GPUS_PER_NODE=8
MASTER_ADDR=${MASTER_NODE} # Adjust
MASTER_PORT=6000
NNODES=12 # Adjust
# NODE_RANK=0 # Adjust
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
CHECKPOINT_PATH=/my/experiment/path # Adjust: Directory to store the checkpoints
DATA_PATH=/preprocessed/data/path # Adjust: Prefix of the preprocessed dataset.
TOKENIZER_FILE=/tokenizer/path # Adjust
GPT_ARGS="\
--tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 1 \
--recompute-activations \
--num-layers 24 \
--hidden-size 2048 \
--num-attention-heads 16 \
--attention-head-type multiquery \
--init-method-std 0.022 \
--seq-length 2048 \
--max-position-embeddings 2048 \
--attention-dropout 0.1 \
--hidden-dropout 0.1 \
--micro-batch-size 2 \
--global-batch-size 192 \
--lr 0.0002 \
--train-iters 3000 \
--lr-decay-iters 600000 \
--lr-decay-style cosine \
--lr-warmup-fraction 0.02 \
--weight-decay .1 \
--adam-beta2 .95 \
--clip-grad 1.0 \
--fp16 \
--log-interval 10 \
--save-interval 4000 \
--eval-interval 200 \
--eval-iters 10 \
--initial-loss-scale 65536 \
--fim-rate 0.5 \
"
TENSORBOARD_ARGS="--tensorboard-dir ${CHECKPOINT_PATH}/tensorboard"
torchrun $DISTRIBUTED_ARGS \
pretrain_gpt.py \
$GPT_ARGS \
--tokenizer-type TokenizerFromFileWithFIM \
--tokenizer-file $TOKENIZER_FILE \
--save $CHECKPOINT_PATH \
--load $CHECKPOINT_PATH \
--data-path $DATA_PATH \
$TENSORBOARD_ARGS