-
Notifications
You must be signed in to change notification settings - Fork 0
/
fashion.yaml
48 lines (38 loc) · 1.57 KB
/
fashion.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# @package _global_
# to execute this experiment run:
# python eval.py experiment=generation/diffusion/eval/fashion
defaults:
- override /data: fashion.yaml
- override /model: diffusion/diffusion_module.yaml
- override /callbacks: generation_diffusion.yaml
- override /trainer: ddp.yaml
- override /logger: wandb.yaml
# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
seed: 12345
task_name: generation/diffusion/eval
trainer:
devices: 1
data:
batch_size: 512 # for 1 GPU 16G
num_workers: 10
train_val_test_split: [10_000, 10_000, 50_000] # test with 50.000 samples
model:
net:
denoise_net:
base_channels: 128
n_layer_blocks: 2
channel_multipliers: [1, 2, 2, 2]
attention_levels: [1] # resolution 16
sampler:
_target_: src.models.diffusion.sampler.DDPMSampler
n_train_steps: ${model.net.n_train_steps}
beta_schedule: linear
logger:
wandb:
name: ${now:%Y-%m-%d}_${now:%H-%M-%S}
project: generation
tags: ${tags}
group: diffusion
id: null # pass correct id to resume experiment! (if ckpt_path is not null)
ckpt_path: null # simply provide checkpoint path to resume training