forked from VinAIResearch/JointIDSF
-
Notifications
You must be signed in to change notification settings - Fork 0
/
early_stopping.py
executable file
·64 lines (56 loc) · 2.49 KB
/
early_stopping.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, val_loss, model, args):
if args.tuning_metric == "loss":
score = -val_loss
else:
score = val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, args)
elif score < self.best_score:
self.counter += 1
print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, args)
self.counter = 0
def save_checkpoint(self, val_loss, model, args):
"""Saves model when validation loss decreases or accuracy/f1 increases."""
if self.verbose:
if args.tuning_metric == "loss":
print(f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...")
else:
print(
f"{args.tuning_metric} increased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ..."
)
model.save_pretrained(args.model_dir)
torch.save(args, os.path.join(args.model_dir, "training_args.bin"))
self.val_loss_min = val_loss
# # Save model checkpoint (Overwrite)
# if not os.path.exists(self.args.model_dir):
# os.makedirs(self.args.model_dir)
# model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
# model_to_save.save_pretrained(self.args.model_dir)
# # Save training arguments together with the trained model
# torch.save(self.args, os.path.join(self.args.model_dir, 'training_args.bin'))
# logger.info("Saving model checkpoint to %s", self.args.model_dir)