diff --git a/src/main.py b/src/main.py index 18e420c..7ecc702 100644 --- a/src/main.py +++ b/src/main.py @@ -135,8 +135,8 @@ def main(opt): yte_ = cls.predict(Xte) print('end-to-end-finetuning network prediction') acc, macrof1, microf1 = evaluation(yte, yte_) - foo.write( - f'end-to-end-finetuning network prediction: acc={acc:.3f} macrof1={macrof1:.3f} microf1={microf1:.3f}\n') + foo.write(f'end-to-end-finetuning network prediction: ' + f'acc={acc:.3f} macrof1={macrof1:.3f} microf1={microf1:.3f}\n') print('training end-to-end without self-supervision init') cls, phi = instantiate_model(A, index, pad_index, device) @@ -150,6 +150,8 @@ def main(opt): yte_ = cls.predict(Xte) print('end-to-end (w/o self-supervised initialization) network prediction') acc, macrof1, microf1 = evaluation(yte, yte_) + foo.write(f'end-to-end (w/o self-supervised initialization) ' + f'network prediction: acc={acc:.3f} macrof1={macrof1:.3f} microf1={microf1:.3f}\n') svm_experiment(Xtr_svm, ytr, Xte_svm, yte, foo, 'svm-kernel') diff --git a/src/model/classifiers.py b/src/model/classifiers.py index 3fce2f1..435a772 100644 --- a/src/model/classifiers.py +++ b/src/model/classifiers.py @@ -133,7 +133,7 @@ class AuthorshipAttributionClassifier(nn.Module): early_stop = EarlyStop(patience, lower_is_better=True) criterion = SupConLoss1View().to(self.device) - optim = torch.optim.Adam(self.parameters(), lr=lr) + optim = torch.optim.Adam(self.parameters(), lr=lr) # take only phi's parameters tr_data = IndexedDataset(X, y, self.pad_length, self.pad_index, self.device) val_data = IndexedDataset(Xval, yval, self.pad_length, self.pad_index, self.device)