-
Notifications
You must be signed in to change notification settings - Fork 6
/
test_parameters_20i_voc.yaml
108 lines (102 loc) · 3.28 KB
/
test_parameters_20i_voc.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
experiment:
# It contains all the about the grids and the group of runs:
name: LabelAnything # name of the logger platform experiment
group: Test # name of group of experiments for the logger platform
continue_with_errors: False # continue with other runs even if a run fails
start_from_grid: 0 # skip grids in the grid search
start_from_run: 0 # skip runs from the selected grid
search: grid
# n_trials: 5
parameters:
# Contains the parameters to build the grid.
# Each value should be a dict or a list
logger:
log_frequency: [1]
tags: [[Test]] # list of tags to attach to the run in logger platform
train_image_log_frequency: [5]
val_image_log_frequency: [5]
experiment_save_delta: [null] # save experiment every n seconds
tmp_dir: [tmp]
wandb:
offline: [False]
entity: [cilabuniba]
# resume: [True]
# run_id: [pg70tf8u]
train_params:
loss:
class_weighting: [True]
components:
# - focal:
# weight: 0.485
# dice:
# weight: 0.05
# rmi:
# weight: 0.485
- focal:
weight: 0.5
dice:
weight: 0.025
prompt_contrastive:
weight: 0.475
seed: &seed [42] # random seed to set
# substitution_threshold: [0.1] # threshold
num_points: [1] # number of points for class after the substitution
max_epochs: [4]
compile: [False]
initial_lr: [0.00001]
optimizer: [AdamW]
scheduler:
type: [cosine]
step_moment: [batch]
num_warmup_steps: [1000]
# warmup_steps: [2500]
substitute: [False]
accumulate_substitution: [False] # Perform gradient accumulation over the substitution sub-batch
validation_reruns: [2]
watch_metric: [loss] # metric to watch for early stopping and scheduler (loss or miou)
freeze_backbone: [True]
check_nan: [1] # check for nan every n batches
model:
name: [dummy] # path to model class or model name contained in EzDL or super-gradients
# checkpoint: [checkpoints/sam_vit_b_01ec64.pth] # model parameters
# use_sam_checkpoint: [True]
# spatial_convs: [3]
# class_attention: [True]
# segment_example_logits: [True]
# image_embed_dim: [256]
# embed_dim: [256]
# image_size: &image_size [1024]
# dropout: [0.1]
dataset: # parameters depending on the class you defined for the dataset
datasets:
voc5i:
name: [voc]
instances_path: [data/annotations/instances_voc12.json]
img_dir: &img_dir [data/raw/VOCdevkit/VOC2012]
split: ["train"]
val_fold_idx: [0]
n_folds: [4]
val_voc5i:
name: [coco]
instances_path: [data/annotations/instances_voc12.json]
img_dir: *img_dir
split: ["val"]
val_fold_idx: [0]
n_folds: [4]
n_shots: [2]
n_ways: [2]
do_subsample: [False]
add_box_noise: [False]
common:
do_subsample: [True]
add_box_noise: [True]
max_points_annotations: [50]
max_points_per_annotation: [10]
image_size: [1024]
dataloader:
num_workers: [0]
possible_batch_example_nums: [[[1, 4]]]
val_possible_batch_example_nums: [[[1, 1]]]
prompt_types: [["point", "bbox", "mask"]]
val_prompt_types: [["mask"]]
other_grids: