From 2d6ac4af0d2fa52bb7caf07b2acd6fe69edc84da Mon Sep 17 00:00:00 2001 From: Alejandro Moreo Date: Mon, 4 Sep 2023 12:05:25 +0200 Subject: [PATCH] testing the sensibility of KDEy to the bandwidth --- distribution_matching/show_results.py | 2 +- .../tweets_bandwidth_sensibility.py | 63 +++++++++++++++++++ distribution_matching/tweets_experiments.py | 6 +- 3 files changed, 67 insertions(+), 4 deletions(-) create mode 100644 distribution_matching/tweets_bandwidth_sensibility.py diff --git a/distribution_matching/show_results.py b/distribution_matching/show_results.py index c2b8eeb..918d12e 100644 --- a/distribution_matching/show_results.py +++ b/distribution_matching/show_results.py @@ -2,7 +2,7 @@ import sys from pathlib import Path import pandas as pd -result_dir = 'results_tweet_1000_mrae' +result_dir = 'results_tweet_mae_redohyper' #result_dir = 'results_lequa_mrae' dfs = [] diff --git a/distribution_matching/tweets_bandwidth_sensibility.py b/distribution_matching/tweets_bandwidth_sensibility.py new file mode 100644 index 0000000..f3564fc --- /dev/null +++ b/distribution_matching/tweets_bandwidth_sensibility.py @@ -0,0 +1,63 @@ +import pickle +import numpy as np +from sklearn.linear_model import LogisticRegression +import os +import sys +import pandas as pd + +import quapy as qp +from quapy.method.aggregative import EMQ, DistributionMatching, PACC, ACC, CC, PCC, HDy, OneVsAllAggregative +from method_kdey import KDEy +from method_dirichlety import DIRy +from quapy.model_selection import GridSearchQ +from quapy.protocol import UPP + +SEED=1 + +if __name__ == '__main__': + + qp.environ['SAMPLE_SIZE'] = 100 + qp.environ['N_JOBS'] = -1 + n_bags_val = 250 + n_bags_test = 1000 + result_dir = f'results_tweet_sensibility' + + os.makedirs(result_dir, exist_ok=True) + + method = 'KDEy-MLE' + + global_result_path = f'{result_dir}/{method}' + + if not os.path.exists(global_result_path+'.csv'): + with open(global_result_path+'.csv', 'wt') as csv: + csv.write(f'Method\tDataset\tBandwidth\tMAE\tMRAE\tKLD\n') + + with open(global_result_path+'.csv', 'at') as csv: + # four semeval dataset share the training, so it is useless to optimize hyperparameters four times; + # this variable controls that the mod sel has already been done, and skip this otherwise + semeval_trained = False + + for bandwidth in np.linspace(0.01, 0.2, 20): + for dataset in qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST: + print('init', dataset) + + local_result_path = global_result_path + '_' + dataset + f'_{bandwidth:.3f}' + + with qp.util.temp_seed(SEED): + + data = qp.datasets.fetch_twitter(dataset, min_df=3, pickle=True, for_model_selection=False) + quantifier = KDEy(LogisticRegression(), target='max_likelihood', val_split=10, bandwidth=bandwidth) + quantifier.fit(data.training) + protocol = UPP(data.test, repeats=n_bags_test) + report = qp.evaluation.evaluation_report(quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True) + report.to_csv(f'{local_result_path}.dataframe') + means = report.mean() + csv.write(f'{method}\t{data.name}\t{bandwidth}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\n') + csv.flush() + + df = pd.read_csv(global_result_path+'.csv', sep='\t') + + pd.set_option('display.max_columns', None) + pd.set_option('display.max_rows', None) + pv = df.pivot_table(index='Dataset', columns="Method", values=["MAE", "MRAE"]) + print(pv) diff --git a/distribution_matching/tweets_experiments.py b/distribution_matching/tweets_experiments.py index ed3847a..8d2e542 100644 --- a/distribution_matching/tweets_experiments.py +++ b/distribution_matching/tweets_experiments.py @@ -20,13 +20,13 @@ if __name__ == '__main__': qp.environ['N_JOBS'] = -1 n_bags_val = 250 n_bags_test = 1000 - optim = 'mrae' - result_dir = f'results_tweet_{optim}' + optim = 'mae' + result_dir = f'results_tweet_{optim}_redohyper' os.makedirs(result_dir, exist_ok=True) hyper_LR = { - 'classifier__C': np.logspace(-4,4,9), + 'classifier__C': np.logspace(-3,3,7), 'classifier__class_weight': ['balanced', None] }