activating every layer
This commit is contained in:
parent
58f63586cd
commit
c32d9da567
|
@ -5,7 +5,6 @@ from sklearn.metrics import accuracy_score, f1_score
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import math
|
import math
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
|
|
||||||
from model.early_stop import EarlyStop
|
from model.early_stop import EarlyStop
|
||||||
from model.transformations import FFProjection
|
from model.transformations import FFProjection
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,8 @@ class CNNProjection(nn.Module):
|
||||||
hidden_sizes=[1024],
|
hidden_sizes=[1024],
|
||||||
output_size=out_size,
|
output_size=out_size,
|
||||||
activation=nn.functional.relu,
|
activation=nn.functional.relu,
|
||||||
dropout=dropout)
|
dropout=dropout,
|
||||||
|
activate_last=True)
|
||||||
self.output_size = out_size
|
self.output_size = out_size
|
||||||
|
|
||||||
def convolve(self, x):
|
def convolve(self, x):
|
||||||
|
@ -42,8 +43,6 @@ class CNNProjection(nn.Module):
|
||||||
x = self.embed(x) # (N, W, D)
|
x = self.embed(x) # (N, W, D)
|
||||||
x = self.convolve(x) # (N, len(Ks)*Co]
|
x = self.convolve(x) # (N, len(Ks)*Co]
|
||||||
x = self.fc(x)
|
x = self.fc(x)
|
||||||
#x = F.relu(self.fc1(x)) # (N, C)
|
|
||||||
# x = self.dropout(x)
|
|
||||||
x = self.l2norm(x)
|
x = self.l2norm(x)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
@ -52,7 +51,8 @@ class CNNProjection(nn.Module):
|
||||||
|
|
||||||
|
|
||||||
class FFProjection(nn.Module):
|
class FFProjection(nn.Module):
|
||||||
def __init__(self, input_size, hidden_sizes, output_size, activation=nn.functional.relu, dropout=0.5):
|
def __init__(self, input_size, hidden_sizes, output_size, activation=nn.functional.relu, dropout=0.5,
|
||||||
|
activate_last=False):
|
||||||
super(FFProjection, self).__init__()
|
super(FFProjection, self).__init__()
|
||||||
sizes = [input_size] + hidden_sizes + [output_size]
|
sizes = [input_size] + hidden_sizes + [output_size]
|
||||||
self.ff = nn.ModuleList([
|
self.ff = nn.ModuleList([
|
||||||
|
@ -60,11 +60,14 @@ class FFProjection(nn.Module):
|
||||||
])
|
])
|
||||||
self.activation = activation
|
self.activation = activation
|
||||||
self.dropout = nn.Dropout(p=dropout)
|
self.dropout = nn.Dropout(p=dropout)
|
||||||
|
self.activate_last = activate_last
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
for linear in self.ff[:-1]:
|
last_layer_idx = len(self.ff)-1
|
||||||
x = self.dropout(self.activation(linear(x)))
|
for i,linear in enumerate(self.ff):
|
||||||
x = self.ff[-1](x)
|
x = linear(x)
|
||||||
|
if i < last_layer_idx or self.activate_last:
|
||||||
|
x = self.dropout(self.activation(x))
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue