"推理时候报错,说没有图中modelscope文件,怎么处理?
export CUDA_DEVICE_MAX_CONNECTIONS=1
DIR=pwd
export CUDA_VISIBLE_DEVICES=1,2,3
GPUS_PER_NODE=3
NNODES=1
NODE_RANK=0
MASTER_ADDR=localhost
MASTER_PORT=6001
MODEL=""Qwen/Qwen-VL-Chat-Int4"" # Qwen/Qwen-VL-Chat-Int4 Set the path if you do not want to load from huggingface directly
DATA=""data/train_data.json""
DISTRIBUTED_ARGS=""
--nproc_per_node $GPUS_PER_NODE \
--nnodes $NNODES \
--node_rank $NODE_RANK \
--master_addr $MASTER_ADDR \
--master_port $MASTER_PORT
""
torchrun $DISTRIBUTED_ARGS finetune.py \
--model_name_or_path $MODEL \
--data_path $DATA \
--fp16 True \
--fix_vit True \
--output_dir output_qlora_model \
--num_train_epochs 5 \
--per_device_train_batch_size 2 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--evaluation_strategy ""no"" \
--save_strategy ""steps"" \
--save_steps 50 \
--save_total_limit 10 \
--learning_rate 1e-5 \
--weight_decay 0.1 \
--adam_beta2 0.95 \
--warmup_ratio 0.01 \
--lr_scheduler_type ""cosine"" \
--logging_steps 1 \
--report_to ""none"" \
--model_max_length 2048 \
--lazy_preprocess True \
--use_lora \
--q_lora \
--gradient_checkpointing \
--deepspeed finetune/ds_config_zero2.json"
您参考这个文档微调,learning_rate正常,推理也正常,参考以下链接 https://github.com/modelscope/swift/blob/main/docs/source/Multi-Modal/qwen-vl%E6%9C%80%E4%BD%B3%E5%AE%9E%E8%B7%B5.md 此回答整理自钉群“魔搭ModelScope开发者联盟群 ①”
ModelScope旨在打造下一代开源的模型即服务共享平台,为泛AI开发者提供灵活、易用、低成本的一站式模型服务产品,让模型应用更简单!欢迎加入技术交流群:微信公众号:魔搭ModelScope社区,钉钉群号:44837352