forked from deepmodeling/Uni-Mol
-
Notifications
You must be signed in to change notification settings - Fork 0
/
inference.sh
20 lines (17 loc) · 947 Bytes
/
inference.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[ -z "${MASTER_PORT}" ] && MASTER_PORT=10087
[ -z "${MASTER_IP}" ] && MASTER_IP=127.0.0.1
[ -z "${n_gpu}" ] && n_gpu=$(nvidia-smi -L | wc -l)
[ -z "${OMPI_COMM_WORLD_SIZE}" ] && OMPI_COMM_WORLD_SIZE=1
[ -z "${OMPI_COMM_WORLD_RANK}" ] && OMPI_COMM_WORLD_RANK=0
[ -z "${arch}" ] && arch=unimol_plus_pcq_base
[ -z "${task}" ] && task=pcq
[ -z "${batch_size}" ] && batch_size=128
mkdir -p $results_path
torchrun --nproc_per_node=$n_gpu --nnodes=$OMPI_COMM_WORLD_SIZE --node_rank=$OMPI_COMM_WORLD_RANK --master_addr=$MASTER_IP --master_port=$MASTER_PORT \
./inference.py --user-dir ./unimol_plus/ $data_path --valid-subset $1 \
--results-path $results_path \
--num-workers 8 --ddp-backend=c10d --batch-size $batch_size \
--task $task --loss unimol_plus --arch $arch \
--path $weight_path \
--fp16 --fp16-init-scale 4 --fp16-scale-window 256 \
--log-interval 50 --log-format simple --label-prob 0.0