-
Notifications
You must be signed in to change notification settings - Fork 11
/
model.py
79 lines (54 loc) · 1.74 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch.nn as nn
import torch
import torch.nn.functional as F
class elu(nn.Module):
def __init__(self) -> None:
super(elu, self).__init__()
def forward(self, x):
return torch.where(x >= 0, x, 0.2 * (torch.exp(x) - 1))
class linear(nn.Module):
def __init__(self, in_c, out_c) -> None:
super(linear, self).__init__()
self.w = nn.Parameter(
torch.randn(out_c, in_c) * torch.sqrt(torch.tensor(2 / in_c))
)
self.b = nn.Parameter(torch.randn(out_c))
def forward(self, x):
return F.linear(x, self.w, self.b)
class MLP_MNIST(nn.Module):
def __init__(self) -> None:
super(MLP_MNIST, self).__init__()
self.fc1 = linear(28 * 28, 80)
self.fc2 = linear(80, 60)
self.fc3 = linear(60, 10)
self.flatten = nn.Flatten()
self.activation = elu()
def forward(self, x):
x = self.flatten(x)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
x = self.activation(x)
x = self.fc3(x)
x = self.activation(x)
return x
class MLP_CIFAR10(nn.Module):
def __init__(self) -> None:
super(MLP_CIFAR10, self).__init__()
self.fc1 = linear(32 * 32 * 3, 80)
self.fc2 = linear(80, 60)
self.fc3 = linear(60, 10)
self.flatten = nn.Flatten()
self.activation = elu()
def forward(self, x):
x = self.flatten(x)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
x = self.activation(x)
x = self.fc3(x)
x = self.activation(x)
return x
MODEL_DICT = {"mnist": MLP_MNIST, "cifar": MLP_CIFAR10}
def get_model(dataset, device):
return MODEL_DICT[dataset]().to(device)