diff --git a/example/multinode/train_grpo_multinode_32b.sh b/example/multinode/train_grpo_multinode_32b.sh new file mode 100644 index 0000000..7add385 --- /dev/null +++ b/example/multinode/train_grpo_multinode_32b.sh @@ -0,0 +1,77 @@ +data_name=nq_hotpotqa_train + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export DATA_DIR=data/${data_name} # first download the data from https://huggingface.co/datasets/PeterJinGo/nq_hotpotqa_train + +WAND_PROJECT="Search-R1" +RAY_DASHBOARD_ADDRESS="http://xx.xx.xx.xx:8265" # your head node address +N_NODES=4 + +export BASE_MODEL='Qwen/Qwen2.5-32B' +export EXPERIMENT_NAME=${train_data}-${test_data}-search-r1-grpo-qwen2.5-32b-em-multinode-${N_NODES} + +# set -x +export VLLM_ATTENTION_BACKEND=XFORMERS # vllm + qwen2-7b with flash_attn has some issues + +# max_prompt_length = (config['training']['max_start_length'] + config['training']['max_response_length'] * (config['training']['max_turns'] - 1) + config['training']['max_obs_length'] * config['training']['max_turns']) + +ulimit -n 65535 + +ray job submit --address=$RAY_DASHBOARD_ADDRESS \ + --runtime-env=verl/trainer/runtime_env.yaml \ + --no-wait \ + -- \ + python3 -m verl.trainer.main_ppo \ + data.train_files=$DATA_DIR/train.parquet \ + data.val_files=$DATA_DIR/test.parquet \ + data.train_data_num=null \ + data.val_data_num=null \ + data.train_batch_size=512 \ + data.val_batch_size=256 \ + data.max_prompt_length=4096 \ + data.max_response_length=500 \ + data.max_start_length=2048 \ + data.max_obs_length=500 \ + data.shuffle_train_dataloader=True \ + algorithm.adv_estimator=grpo \ + actor_rollout_ref.model.path=$BASE_MODEL \ + actor_rollout_ref.model.enable_gradient_checkpointing=True \ + actor_rollout_ref.model.use_remove_padding=True \ + actor_rollout_ref.actor.optim.lr=2e-7 \ + actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.285 \ + actor_rollout_ref.actor.use_kl_loss=True \ + actor_rollout_ref.actor.ppo_mini_batch_size=256 \ + actor_rollout_ref.actor.ppo_micro_batch_size=64 \ + actor_rollout_ref.actor.fsdp_config.param_offload=false \ + actor_rollout_ref.actor.fsdp_config.grad_offload=false \ + actor_rollout_ref.actor.fsdp_config.optimizer_offload=false \ + actor_rollout_ref.rollout.log_prob_micro_batch_size=128 \ + actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ + actor_rollout_ref.rollout.name=vllm \ + actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ + actor_rollout_ref.ref.log_prob_micro_batch_size=128 \ + actor_rollout_ref.ref.fsdp_config.param_offload=false \ + actor_rollout_ref.actor.kl_loss_coef=0.001 \ + actor_rollout_ref.actor.kl_loss_type=low_var_kl \ + algorithm.no_think_rl=false \ + actor_rollout_ref.rollout.n_agent=5 \ + actor_rollout_ref.rollout.temperature=1 \ + actor_rollout_ref.actor.state_masking=True \ + trainer.logger=['wandb'] \ + +trainer.val_only=false \ + +trainer.val_before_train=false \ + trainer.default_hdfs_dir=null \ + trainer.n_gpus_per_node=8 \ + trainer.nnodes=$N_NODES \ + trainer.save_freq=100 \ + trainer.test_freq=100 \ + trainer.project_name=$WAND_PROJECT \ + trainer.experiment_name=$EXPERIMENT_NAME \ + trainer.total_epochs=15 \ + trainer.total_training_steps=1005 \ + trainer.default_hdfs_dir=null \ + trainer.default_local_dir=verl_checkpoints/$EXPERIMENT_NAME \ + max_turns=4 \ + retriever.url="http://127.0.0.1:8000/retrieve" \ + retriever.topk=3 \ + 2>&1 | tee $EXPERIMENT_NAME.log diff --git a/example/multinode/train_grpo_multinode_72b.sh b/example/multinode/train_grpo_multinode_72b.sh new file mode 100644 index 0000000..100e928 --- /dev/null +++ b/example/multinode/train_grpo_multinode_72b.sh @@ -0,0 +1,75 @@ +data_name=nq_hotpotqa_train + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export DATA_DIR=data/${data_name} # first download the data from https://huggingface.co/datasets/PeterJinGo/nq_hotpotqa_train + +WAND_PROJECT="Search-R1" +RAY_DASHBOARD_ADDRESS="http://xx.xx.xx.xx:8265" # your head node address +N_NODES=4 + +export BASE_MODEL='Qwen/Qwen2.5-72B' +export EXPERIMENT_NAME=${train_data}-${test_data}-search-r1-grpo-qwen2.5-72b-em-multinode-${N_NODES} + +# set -x +export VLLM_ATTENTION_BACKEND=XFORMERS # vllm + qwen2-7b with flash_attn has some issues + +ulimit -n 65535 + +ray job submit --address=$RAY_DASHBOARD_ADDRESS \ + --runtime-env=verl/trainer/runtime_env.yaml \ + --no-wait \ + -- \ + python3 -m verl.trainer.main_ppo \ + data.train_files=$DATA_DIR/train.parquet \ + data.val_files=$DATA_DIR/test.parquet \ + data.train_data_num=null \ + data.val_data_num=null \ + data.train_batch_size=512 \ + data.val_batch_size=256 \ + data.max_prompt_length=4096 \ + data.max_response_length=500 \ + data.max_start_length=2048 \ + data.max_obs_length=500 \ + data.shuffle_train_dataloader=True \ + algorithm.adv_estimator=grpo \ + actor_rollout_ref.model.path=$BASE_MODEL \ + actor_rollout_ref.model.enable_gradient_checkpointing=True \ + actor_rollout_ref.model.use_remove_padding=True \ + actor_rollout_ref.actor.optim.lr=1e-7 \ + actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.285 \ + actor_rollout_ref.actor.use_kl_loss=True \ + actor_rollout_ref.actor.ppo_mini_batch_size=256 \ + actor_rollout_ref.actor.ppo_micro_batch_size=32 \ + actor_rollout_ref.actor.fsdp_config.param_offload=True \ + actor_rollout_ref.actor.fsdp_config.grad_offload=True \ + actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ + actor_rollout_ref.rollout.log_prob_micro_batch_size=32 \ + actor_rollout_ref.rollout.tensor_model_parallel_size=4 \ + actor_rollout_ref.rollout.name=vllm \ + actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ + actor_rollout_ref.ref.log_prob_micro_batch_size=32 \ + actor_rollout_ref.ref.fsdp_config.param_offload=True \ + actor_rollout_ref.actor.kl_loss_coef=0.001 \ + actor_rollout_ref.actor.kl_loss_type=low_var_kl \ + algorithm.no_think_rl=false \ + actor_rollout_ref.rollout.n_agent=5 \ + actor_rollout_ref.rollout.temperature=1 \ + actor_rollout_ref.actor.state_masking=True \ + trainer.logger=['wandb'] \ + +trainer.val_only=false \ + +trainer.val_before_train=false \ + trainer.default_hdfs_dir=null \ + trainer.n_gpus_per_node=8 \ + trainer.nnodes=$N_NODES \ + trainer.save_freq=100 \ + trainer.test_freq=100 \ + trainer.project_name=$WAND_PROJECT \ + trainer.experiment_name=$EXPERIMENT_NAME \ + trainer.total_epochs=15 \ + trainer.total_training_steps=1005 \ + trainer.default_hdfs_dir=null \ + trainer.default_local_dir=verl_checkpoints/$EXPERIMENT_NAME \ + max_turns=4 \ + retriever.url="http://127.0.0.1:8000/retrieve" \ + retriever.topk=3 \ + 2>&1 | tee $EXPERIMENT_NAME.log diff --git a/example/multinode/train_ppo_multinode_32b.sh b/example/multinode/train_ppo_multinode_32b.sh new file mode 100644 index 0000000..0cc93ad --- /dev/null +++ b/example/multinode/train_ppo_multinode_32b.sh @@ -0,0 +1,84 @@ +data_name=nq_hotpotqa_train + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export DATA_DIR=data/${data_name} # first download the data from https://huggingface.co/datasets/PeterJinGo/nq_hotpotqa_train + +WAND_PROJECT="Search-R1" +RAY_DASHBOARD_ADDRESS="http://xx.xx.xx.xx:8265" # your head node address +N_NODES=4 + +export BASE_MODEL='Qwen/Qwen2.5-32B' +export EXPERIMENT_NAME=${train_data}-${test_data}-search-r1-ppo-qwen2.5-32b-em-multinode-${N_NODES} + +# set -x +export VLLM_ATTENTION_BACKEND=XFORMERS + +ulimit -n 65535 + +ray job submit --address=$RAY_DASHBOARD_ADDRESS \ + --runtime-env=verl/trainer/runtime_env.yaml \ + --no-wait \ + -- \ + python3 -m verl.trainer.main_ppo \ + data.train_files=$DATA_DIR/train.parquet \ + data.val_files=$DATA_DIR/test.parquet \ + data.train_data_num=null \ + data.val_data_num=null \ + data.train_batch_size=512 \ + data.val_batch_size=256 \ + data.max_prompt_length=4096 \ + data.max_response_length=500 \ + data.max_start_length=2048 \ + data.max_obs_length=500 \ + data.shuffle_train_dataloader=True \ + algorithm.adv_estimator=gae \ + actor_rollout_ref.model.path=$BASE_MODEL \ + actor_rollout_ref.actor.optim.lr=2e-7 \ + actor_rollout_ref.model.enable_gradient_checkpointing=true \ + actor_rollout_ref.model.use_remove_padding=True \ + actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.285 \ + actor_rollout_ref.actor.ppo_mini_batch_size=256 \ + actor_rollout_ref.actor.ppo_micro_batch_size=32 \ + actor_rollout_ref.actor.fsdp_config.param_offload=False \ + actor_rollout_ref.actor.fsdp_config.grad_offload=False \ + actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \ + actor_rollout_ref.rollout.log_prob_micro_batch_size=32 \ + actor_rollout_ref.rollout.tensor_model_parallel_size=2 \ + actor_rollout_ref.rollout.name=vllm \ + actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \ + actor_rollout_ref.ref.log_prob_micro_batch_size=32 \ + actor_rollout_ref.ref.fsdp_config.param_offload=False \ + actor_rollout_ref.rollout.n_agent=1 \ + actor_rollout_ref.rollout.temperature=1 \ + actor_rollout_ref.rollout.top_p=1.0 \ + actor_rollout_ref.actor.state_masking=true \ + critic.optim.lr=1e-5 \ + critic.model.use_remove_padding=True \ + critic.optim.lr_warmup_steps_ratio=0.015 \ + critic.model.path=$BASE_MODEL \ + critic.model.enable_gradient_checkpointing=true \ + critic.ppo_micro_batch_size=32 \ + critic.model.fsdp_config.param_offload=False \ + critic.model.fsdp_config.grad_offload=False \ + critic.model.fsdp_config.optimizer_offload=True \ + algorithm.kl_ctrl.kl_coef=0.001 \ + algorithm.no_think_rl=false \ + trainer.critic_warmup=0 \ + trainer.logger=['wandb'] \ + +trainer.val_only=false \ + +trainer.val_before_train=true \ + trainer.default_hdfs_dir=null \ + trainer.n_gpus_per_node=8 \ + trainer.nnodes=$N_NODES \ + trainer.save_freq=100 \ + trainer.test_freq=100 \ + trainer.project_name=$WAND_PROJECT \ + trainer.experiment_name=$EXPERIMENT_NAME \ + trainer.total_epochs=15 \ + trainer.total_training_steps=1005 \ + trainer.default_hdfs_dir=null \ + trainer.default_local_dir=verl_checkpoints/$EXPERIMENT_NAME \ + max_turns=4 \ + retriever.url="http://127.0.0.1:8000/retrieve" \ + retriever.topk=3 \ + 2>&1 | tee $EXPERIMENT_NAME.log