activating every layer

This commit is contained in:
Alejandro Moreo Fernandez 2020-07-08 12:13:35 +02:00
parent 58f63586cd
commit c32d9da567
2 changed files with 10 additions and 8 deletions

View File

@ -5,7 +5,6 @@ from sklearn.metrics import accuracy_score, f1_score
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from model.early_stop import EarlyStop
from model.transformations import FFProjection

View File

@ -19,7 +19,8 @@ class CNNProjection(nn.Module):
hidden_sizes=[1024],
output_size=out_size,
activation=nn.functional.relu,
dropout=dropout)
dropout=dropout,
activate_last=True)
self.output_size = out_size
def convolve(self, x):
@ -42,8 +43,6 @@ class CNNProjection(nn.Module):
x = self.embed(x) # (N, W, D)
x = self.convolve(x) # (N, len(Ks)*Co]
x = self.fc(x)
#x = F.relu(self.fc1(x)) # (N, C)
# x = self.dropout(x)
x = self.l2norm(x)
return x
@ -52,7 +51,8 @@ class CNNProjection(nn.Module):
class FFProjection(nn.Module):
def __init__(self, input_size, hidden_sizes, output_size, activation=nn.functional.relu, dropout=0.5):
def __init__(self, input_size, hidden_sizes, output_size, activation=nn.functional.relu, dropout=0.5,
activate_last=False):
super(FFProjection, self).__init__()
sizes = [input_size] + hidden_sizes + [output_size]
self.ff = nn.ModuleList([
@ -60,11 +60,14 @@ class FFProjection(nn.Module):
])
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.activate_last = activate_last
def forward(self, x):
for linear in self.ff[:-1]:
x = self.dropout(self.activation(linear(x)))
x = self.ff[-1](x)
last_layer_idx = len(self.ff)-1
for i,linear in enumerate(self.ff):
x = linear(x)
if i < last_layer_idx or self.activate_last:
x = self.dropout(self.activation(x))
return x