LLaMA-Factory是一个相当优秀的微调工具。这里提供一个dockerfile和一个train脚本,用于多卡微调,供大家参考。
```FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04
python3
RUN apt-get update && apt-get install -y python3.10 python3-pip
torch
COPY torch-2.2.0+cu121-cp310-cp310-linux_x86_64.whl torch-2.2.0+cu121-cp310-cp310-linux_x86_64.whl
RUN pip3 install torch-2.2.0+cu121-cp310-cp310-linux_x86_64.whl
llama factory requirements
RUN pip3 install transformers==4.37.2 datasets==2.16.1 accelerate==0.25.0 peft==0.7.1 trl==0.7.10 gradio==3.50.2 \
deepspeed modelscope ipython scipy einops sentencepiece protobuf jieba rouge-chinese nltk sse-starlette matplotlib \
--no-cache-dir -i https://pypi.tuna.tsinghua.edu.cn/simple
unsloth
RUN apt-get install -y git
RUN pip install --upgrade pip
RUN pip install triton --no-cache-dir -i https://pypi.tuna.tsinghua.edu.cn/simple
RUN pip install "unsloth[cu121_ampere_torch220] @ git+https://github.com/unslothai/unsloth.git"
train.sh
```docker run \
-it \
--rm \
--name llm \
--network=host \
--shm-size 32G \
--gpus all \
-v /home/[user_name]/.cache/modelscope/hub/:/root/.cache/modelscope/hub/ \
-v /home/[user_name]/LLaMA-Factory/:/LLaMA-Factory/ \
-v /home/[user_name]/.cache/huggingface/accelerate/default_config.yaml:/root/.cache/huggingface/accelerate/default_config.yaml \
-w /LLaMA-Factory \
-e USE_MODELSCOPE_HUB=1 \
llm:v1.1 \
accelerate launch src/train_bash.py \
--stage sft \
--do_train True \
--model_name_or_path ZhipuAI/chatglm3-6b \
--finetuning_type lora \
--use_unsloth True \
--template chatglm3 \
--dataset_dir data \
--dataset alpaca_gpt4_zh \
--cutoff_len 512 \
--learning_rate 5e-05 \
--num_train_epochs 2.0 \
--max_samples 8000 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 2 \
--lr_scheduler_type cosine \
--max_grad_norm 1.0 \
--logging_steps 5 \
--save_steps 1000 \
--warmup_steps 0 \
--lora_rank 8 \
--lora_dropout 0.1 \
--lora_target query_key_value \
--output_dir saves/ChatGLM3-6B-Chat/lora/train_20240212 \
--fp16 True \
--plot_loss True
注意事项:
–shm-size 32G --gpus all 这两个参数是必要的
–use_unsloth True 可以调用unsloth实现加速
需要保证–gradient_accumulation_steps 2在deepspeed配置中的一致性
default_config.yaml
```compute_environment: LOCAL_MACHINE
debug: false
distributed_type: MULTI_GPU
deepspeed_config:
deepspeed_multinode_launcher: standard
gradient_accumulation_steps: 2
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: false
zero3_save_16bit_model: false
zero_stage: 2
distributed_type: DEEPSPEED
downcast_bf16: 'no'
gpu_ids: all
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```