-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
90 lines (69 loc) · 2.56 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import torch
import torchaudio
from torch import nn
from torch.utils.data import DataLoader
from vibvizdataset import VibVizDataset
from cnn import CNNNetwork
BATCH_SIZE = 12
EPOCHS = 10
LEARNING_RATE = 0.001
ANNOTATIONS_FILE = "/Users/karthik/Documents/GenHap/VibvizModel/VibVizModel/audio_wav/vibrationAnnotations-July24th2016.csv"
AUDIO_DIR = "/Users/karthik/Documents/GenHap/VibvizModel/VibVizModel/audio_wav/viblib"
SAMPLE_RATE = 22050
NUM_SAMPLES = 22050
def create_data_loader(train_data, batch_size):
train_dataloader = DataLoader(train_data, batch_size=batch_size)
return train_dataloader
def train_single_epoch(model, data_loader, loss_fn, optimiser, device):
for input, target in data_loader:
input, target = input.to(device), target.to(device)
# calculate loss
prediction = model(input)
loss = loss_fn(prediction, target)
# backpropagate error and update weights
optimiser.zero_grad()
loss.backward()
optimiser.step()
print(f"loss: {loss.item()}")
def train(model, data_loader, loss_fn, optimiser, device, epochs):
for i in range(epochs):
print(f"Epoch {i+1}")
train_single_epoch(model, data_loader, loss_fn, optimiser, device)
print("---------------------------")
print("Finished training")
if __name__ == "__main__":
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"Using {device}")
# instantiating our dataset object and create data loader
mel_spectrogram = torchaudio.transforms.MelSpectrogram(
sample_rate=SAMPLE_RATE,
n_fft=1024,
hop_length=512,
n_mels=64
)
usd = VibVizDataset(ANNOTATIONS_FILE,
AUDIO_DIR,
mel_spectrogram,
SAMPLE_RATE,
NUM_SAMPLES,
device)
train_dataloader = create_data_loader(usd, BATCH_SIZE)
print(train_dataloader)
# for x in train_dataloader:
# print(x)
# construct model and assign it to device
cnn = CNNNetwork().to(device)
print(cnn)
# initialise loss funtion + optimiser
loss_fn = nn.CrossEntropyLoss()
optimiser = torch.optim.Adam(cnn.parameters(),
lr=LEARNING_RATE)
# train model
train(cnn, train_dataloader, loss_fn, optimiser, device, EPOCHS)
# save model
torch.save(cnn.to('cpu'), 'New_cnn.pt')
#torch.save(cnn.state_dict(), "feedforwardnet.pth")
print("Trained feed forward net saved at feedforwardnet.pth")