-
Notifications
You must be signed in to change notification settings - Fork 27
/
default_experiment.yaml
79 lines (79 loc) · 2.28 KB
/
default_experiment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
version: v2-alpha
description: natural instructions
tasks:
- name: instruct
image:
beaker: Yizhongw03/ni-exp
command: [
# deepspeed
"deepspeed", "--master_port", 10086,
# what to do
"src/run_s2s.py",
"--do_train", "--do_predict",
"--predict_with_generate",
# model
"--model_name_or_path", "google/t5-xl-lm-adapt",
"--max_source_length", 1024,
"--max_target_length", 128,
"--generation_max_length", 128,
"--max_num_instances_per_task", 100,
"--max_num_instances_per_eval_task", 100,
"--add_task_name", False,
"--add_task_definition", True,
"--num_pos_examples", 2,
"--num_neg_examples", 0,
"--add_explanation", False,
"--tk_instruct", False,
# path
"--data_dir", "/data/splits/default",
"--task_dir", "/data/tasks",
"--output_dir", "/output/",
"--overwrite_output_dir",
"--cache_dir", "./cache/",
"--overwrite_cache",
# training
"--per_device_train_batch_size", 1,
"--per_device_eval_batch_size", 2,
"--gradient_accumulation_steps", 2,
"--learning_rate", 5e-5,
# "--max_steps", 10000,
"--num_train_epochs", 2,
"--lr_scheduler_type", "constant",
"--warmup_steps", 0,
"--logging_strategy", "steps",
"--logging_steps", 500,
"--evaluation_strategy", "no",
"--save_strategy", "steps",
"--save_steps", 2500,
# deepspeed
"--deepspeed", "ds_configs/stage2.config",
"--bf16",
# log
"--disable_tqdm", True,
"--report_to", "wandb",
"--run_name", "t5-experiment"
]
envVars:
- name: CUDA_DEVICE_ORDER
value: PCI_BUS_ID
- name: TRANSFORMERS_CACHE
value: ./cache/
- name: WANDB_PROJECT
value: NaturalInstruction
- name: WANDB_WATCH
value: false
- name: WANDB_LOG_MODEL
value: false
datasets:
- mountPath: /data
source:
beaker: Yizhongw03/natural_instructions_release_0501
result:
# Beaker will capture anything that's written to this location and store it in the results
# dataset.
path: /output
resources:
gpuCount: 8
context:
cluster: ai2/mosaic-cirrascale
priority: normal