-
Notifications
You must be signed in to change notification settings - Fork 1
/
common.py
68 lines (57 loc) · 2.17 KB
/
common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import argparse
import logging
import random
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch
def str_to_list(string):
return [float(s) for s in string.split(",")]
def str_or_float(value):
try:
return float(value)
except:
return value
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument("--data-path", type=Path, help="path to data")
common_parser.add_argument("--log_path", type=Path, help="path to log")
common_parser.add_argument("--n-epochs", type=int, default=200)
common_parser.add_argument("--n_task", type=int, default=2)
common_parser.add_argument("--batch-size", type=int, default=120, help="batch size")
common_parser.add_argument("--lr", type=float, default=1e-3, help="learning rate")
common_parser.add_argument("--method-params-lr", type=float, default=0.025, help="lr for weight method params. If None, set to args.lr. For uncertainty weighting",)
common_parser.add_argument("--gpu", type=int, default=0, help="gpu device ID")
common_parser.add_argument("--seed", type=int, default=42, help="seed value")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def set_logger():
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,)
def set_seed(seed):
"""for reproducibility
:param seed:
:return:
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def get_device(no_cuda=False, gpus="0"):
return torch.device(
f"cuda:{gpus}" if torch.cuda.is_available() and not no_cuda else "cpu"
)