From bdbe933a41d2de8903a42a5878c588073a0380de Mon Sep 17 00:00:00 2001 From: Alex Moreo Date: Wed, 25 Aug 2021 17:11:22 +0200 Subject: [PATCH] tables and plots updates --- TweetSentQuant/gen_plots.py | 16 ++-- TweetSentQuant/gen_tables.py | 159 +++++++++++++++++------------------ TweetSentQuant/tabular.py | 2 +- TweetSentQuant/util.py | 3 + quapy/plot.py | 41 +++++---- 5 files changed, 116 insertions(+), 105 deletions(-) diff --git a/TweetSentQuant/gen_plots.py b/TweetSentQuant/gen_plots.py index ca5015a..82818e8 100644 --- a/TweetSentQuant/gen_plots.py +++ b/TweetSentQuant/gen_plots.py @@ -35,10 +35,10 @@ def plot_error_by_drift(methods, error_name, logscale=False, path=None): method_names, true_prevs, estim_prevs, tr_prevs = gather_results(methods, error_name) method_order = ['SVM(AE)' if error_name=='ae' else 'SVM(RAE)', - 'PCC', 'SVM(KLD)', 'SVM(Q)', 'SVM(NKLD)', 'CC', 'HDy', - 'E(PACC)$_\\mathrm{Ptr}$', - 'E(PACC)$_\\mathrm{AE}$' if error_name=='ae' else 'E(PACC)$_\\mathrm{RAE}$', - 'QuaNet', 'PACC', 'ACC', 'SLD'] + 'PCC', 'SVM(KLD)', 'SVM(Q)', 'SVM(NKLD)', 'CC', 'HDy', + 'E(PACC)$_\\mathrm{Ptr}$', + 'E(PACC)$_\\mathrm{AE}$' if error_name=='ae' else 'E(PACC)$_\\mathrm{RAE}$', + 'QuaNet', 'PACC', 'ACC', 'SLD'] qp.plot.error_by_drift( method_names, true_prevs, @@ -94,11 +94,11 @@ gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'sld', 'svmq', 'svmkld', 'svmnkld new_methods_ae = ['svmmae' , 'epaccmaeptr', 'epaccmaemae', 'hdy', 'quanet'] new_methods_rae = ['svmmrae' , 'epaccmraeptr', 'epaccmraemrae', 'hdy', 'quanet'] -# plot_error_by_drift(gao_seb_methods+new_methods_ae, error_name='ae', path=plotdir) -# plot_error_by_drift(gao_seb_methods+new_methods_rae, error_name='rae', logscale=True, path=plotdir) +plot_error_by_drift(gao_seb_methods+new_methods_ae, error_name='ae', path=plotdir) +plot_error_by_drift(gao_seb_methods+new_methods_rae, error_name='rae', logscale=True, path=plotdir) -diagonal_plot(gao_seb_methods+new_methods_ae, error_name='ae', path=plotdir) -diagonal_plot(gao_seb_methods+new_methods_rae, error_name='rae', path=plotdir) +# diagonal_plot(gao_seb_methods+new_methods_ae, error_name='ae', path=plotdir) +# diagonal_plot(gao_seb_methods+new_methods_rae, error_name='rae', path=plotdir) # binary_bias_global(gao_seb_methods+new_methods_ae, error_name='ae', path=plotdir) # binary_bias_global(gao_seb_methods+new_methods_rae, error_name='rae', path=plotdir) diff --git a/TweetSentQuant/gen_tables.py b/TweetSentQuant/gen_tables.py index e897d56..e581c44 100644 --- a/TweetSentQuant/gen_tables.py +++ b/TweetSentQuant/gen_tables.py @@ -10,6 +10,7 @@ from experiments import result_path from tabular import Table tables_path = './tables' +results_path = './results' MAXTONE = 50 # sets the intensity of the maximum color reached by the worst (red) and best (green) results makedirs(tables_path, exist_ok=True) @@ -23,8 +24,8 @@ def save_table(path, table): foo.write(table) -def experiment_errors(path, dataset, method, loss): - path = result_path(path, dataset, method, 'm'+loss if not loss.startswith('m') else loss) +def experiment_errors(path, dataset, method, optloss, loss): + path = result_path(path, dataset, method, 'm'+optloss if not loss.startswith('m') else optloss) if os.path.exists(path): true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb')) err_fn = getattr(qp.error, loss) @@ -35,13 +36,10 @@ def experiment_errors(path, dataset, method, loss): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Generate tables for Tweeter Sentiment Quantification') - parser.add_argument('results', metavar='RESULT_PATH', type=str, - help='path to the directory where to store the results') - args = parser.parse_args() datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST evaluation_measures = [qp.error.ae, qp.error.rae] + secundary_eval_measures = [qp.error.kld.__name__, qp.error.nkld.__name__, qp.error.se.__name__] gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'sld', 'svmq', 'svmkld', 'svmnkld'] new_methods = ['hdy', 'quanet'] @@ -52,94 +50,93 @@ if __name__ == '__main__': # Tables evaluation scores for AE and RAE (two tables) # ---------------------------------------------------- - eval_name = eval_func.__name__ - added_methods = ['svmm' + eval_name, f'epaccm{eval_name}ptr', f'epaccm{eval_name}m{eval_name}'] + new_methods + main_eval_name = eval_func.__name__ + added_methods = ['svmm' + main_eval_name, f'epaccm{main_eval_name}ptr', f'epaccm{main_eval_name}m{main_eval_name}'] + new_methods methods = gao_seb_methods + added_methods nold_methods = len(gao_seb_methods) nnew_methods = len(added_methods) - # fill data table - table = Table(benchmarks=datasets, methods=methods) - for dataset in datasets: + for eval_name in [main_eval_name] + secundary_eval_measures: + + # fill data table + table = Table(benchmarks=datasets, methods=methods) + for dataset in datasets: + for method in methods: + table.add(dataset, method, experiment_errors(results_path, dataset, method, main_eval_name, eval_name)) + + # write the latex table + tabular = """ + \\resizebox{\\textwidth}{!}{% + \\begin{tabular}{|c||""" + ('c|' * nold_methods) + '|' + ('c|' * nnew_methods) + """} \hline + & \multicolumn{""" + str(nold_methods) + """}{c||}{Methods tested in~\cite{Gao:2016uq}} & + \multicolumn{""" + str(nnew_methods) + """}{c|}{Newly added methods} \\\\ \hline + """ + rowreplace={dataset: nicename(dataset) for dataset in datasets} + colreplace={method: nicename(method, main_eval_name, side=True) for method in methods} + + tabular += table.latexTabular(benchmark_replace=rowreplace, method_replace=colreplace) + tabular += """ + \end{tabular}% + } + """ + + save_table(f'./tables/tab_results_{main_eval_name}_{eval_name}.tex', tabular) + + continue + + # Tables ranks for AE and RAE (two tables) + # ---------------------------------------------------- + methods = gao_seb_methods + + table.dropMethods(added_methods) + + # fill the data table + ranktable = Table(benchmarks=datasets, methods=methods, missing='--') + for dataset in datasets: + for method in methods: + ranktable.add(dataset, method, values=table.get(dataset, method, 'rank')) + + # write the latex table + tabular = """ + \\resizebox{\\textwidth}{!}{% + \\begin{tabular}{|c||""" + ('c|' * len(gao_seb_methods)) + """} \hline + & \multicolumn{""" + str(nold_methods) + """}{c|}{Methods tested in~\cite{Gao:2016uq}} \\\\ \hline + """ for method in methods: - table.add(dataset, method, experiment_errors(args.results, dataset, method, eval_name)) + tabular += ' & ' + nicename(method, eval_name, side=True) + tabular += "\\\\\hline\n" - # write the latex table - # tabular = """ - # \\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*nold_methods)+ '|' + ('Y|'*nnew_methods) + """} \hline - # & \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} & - # \multicolumn{"""+str(nnew_methods)+"""}{c|}{} \\\\ \hline - # """ - tabular = """ - \\resizebox{\\textwidth}{!}{% - \\begin{tabular}{|c||""" + ('c|' * nold_methods) + '|' + ('c|' * nnew_methods) + """} \hline - & \multicolumn{""" + str(nold_methods) + """}{c||}{Methods tested in~\cite{Gao:2016uq}} & - \multicolumn{""" + str(nnew_methods) + """}{c|}{Newly added methods} \\\\ \hline - """ - rowreplace={dataset: nicename(dataset) for dataset in datasets} - colreplace={method: nicename(method, eval_name, side=True) for method in methods} + for dataset in datasets: + tabular += nicename(dataset) + ' ' + for method in methods: + newrank = ranktable.get(dataset, method) + oldrank = gao_seb_ranks[f'{dataset}-{method}-{eval_name}'] + if newrank != '--': + newrank = f'{int(newrank)}' + color = ranktable.get_color(dataset, method) + if color == '--': + color = '' + tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + color + tabular += '\\\\\hline\n' + tabular += '\hline\n' - tabular += table.latexTabular(benchmark_replace=rowreplace, method_replace=colreplace) - tabular += """ - \end{tabular}% - } - """ - - save_table(f'./tables/tab_results_{eval_name}.new.tex', tabular) - - # Tables ranks for AE and RAE (two tables) - # ---------------------------------------------------- - methods = gao_seb_methods - - table.dropMethods(added_methods) - - # fill the data table - ranktable = Table(benchmarks=datasets, methods=methods, missing='--') - for dataset in datasets: + tabular += 'Average ' for method in methods: - ranktable.add(dataset, method, values=table.get(dataset, method, 'rank')) - - # write the latex table - tabular = """ - \\resizebox{\\textwidth}{!}{% - \\begin{tabular}{|c||""" + ('c|' * len(gao_seb_methods)) + """} \hline - & \multicolumn{""" + str(nold_methods) + """}{c|}{Methods tested in~\cite{Gao:2016uq}} \\\\ \hline - """ - for method in methods: - tabular += ' & ' + nicename(method, eval_name, side=True) - tabular += "\\\\\hline\n" - - for dataset in datasets: - tabular += nicename(dataset) + ' ' - for method in methods: - newrank = ranktable.get(dataset, method) - oldrank = gao_seb_ranks[f'{dataset}-{method}-{eval_name}'] + newrank = ranktable.get_average(method) + oldrank = gao_seb_ranks[f'Average-{method}-{eval_name}'] if newrank != '--': - newrank = f'{int(newrank)}' - color = ranktable.get_color(dataset, method) + newrank = f'{newrank:.1f}' + oldrank = f'{oldrank:.1f}' + color = ranktable.get_average(method, 'color') if color == '--': color = '' tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + color tabular += '\\\\\hline\n' - tabular += '\hline\n' + tabular += """ + \end{tabular}% + } + """ - tabular += 'Average ' - for method in methods: - newrank = ranktable.get_average(method) - oldrank = gao_seb_ranks[f'Average-{method}-{eval_name}'] - if newrank != '--': - newrank = f'{newrank:.1f}' - oldrank = f'{oldrank:.1f}' - color = ranktable.get_average(method, 'color') - if color == '--': - color = '' - tabular += ' & ' + f'{newrank}' + f' ({oldrank}) ' + color - tabular += '\\\\\hline\n' - tabular += """ - \end{tabular}% - } - """ - - save_table(f'./tables/tab_rank_{eval_name}.new.tex', tabular) + save_table(f'./tables/tab_rank_{main_eval_name}.{eval_name}.tex', tabular) print("[Done]") diff --git a/TweetSentQuant/tabular.py b/TweetSentQuant/tabular.py index cb90f3f..324be1e 100644 --- a/TweetSentQuant/tabular.py +++ b/TweetSentQuant/tabular.py @@ -283,7 +283,7 @@ class Table: return t def dropMethods(self, methods): - drop_index = [self.method_index[m] for m in methods] + drop_index = [self.method_index[m] for m in methods if m in self.method_index] new_methods = np.delete(self.methods, drop_index) new_index = {col:j for j, col in enumerate(new_methods)} diff --git a/TweetSentQuant/util.py b/TweetSentQuant/util.py index fef866e..c52d9f4 100644 --- a/TweetSentQuant/util.py +++ b/TweetSentQuant/util.py @@ -6,6 +6,9 @@ nice = { 'mrae':'RAE', 'ae':'AE', 'rae':'RAE', + 'kld':'KLD', + 'nkld':'NKLD', + 'se':'SE', 'svmkld': 'SVM(KLD)', 'svmnkld': 'SVM(NKLD)', 'svmq': 'SVM(Q)', diff --git a/quapy/plot.py b/quapy/plot.py index 90f92e3..b902257 100644 --- a/quapy/plot.py +++ b/quapy/plot.py @@ -170,6 +170,12 @@ def _merge(method_names, true_prevs, estim_prevs): return method_order, true_prevs_, estim_prevs_ +def _set_colors(ax, n_methods): + NUM_COLORS = n_methods + cm = plt.get_cmap('tab20') + ax.set_prop_cycle(color=[cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]) + + def error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs, n_bins=20, error_name='ae', show_std=True, logscale=False, title=f'Quantification error as a function of distribution shift', @@ -184,8 +190,10 @@ def error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs, n_bins=20, e # join all data, and keep the order in which the methods appeared for the first time data = defaultdict(lambda:{'x':np.empty(shape=(0)), 'y':np.empty(shape=(0))}) + if method_order is None: method_order = [] + for method, test_prevs_i, estim_prevs_i, tr_prev_i in zip(method_names, true_prevs, estim_prevs, tr_prevs): tr_prev_i = np.repeat(tr_prev_i.reshape(1,-1), repeats=test_prevs_i.shape[0], axis=0) @@ -198,54 +206,57 @@ def error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs, n_bins=20, e if method not in method_order: method_order.append(method) - print(method_order) + _set_colors(ax, n_methods=len(method_order)) + bins = np.linspace(0, 1, n_bins+1) binwidth = 1 / n_bins - min_x, max_x = None, None - min_y, max_y = None, None - cm = plt.get_cmap('tab20') - NUM_COLORS = len(method_order) - ax.set_prop_cycle(color=[cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]) - for i,method in enumerate(method_order): + min_x, max_x, min_y, max_y = None, None, None, None + npoints = np.zeros(len(bins), dtype=float) + for method in method_order: tr_test_drifts = data[method]['x'] method_drifts = data[method]['y'] if logscale: method_drifts=np.log(1+method_drifts) inds = np.digitize(tr_test_drifts, bins, right=True) + xs, ys, ystds = [], [], [] - for ind in range(len(bins)): + for p,ind in enumerate(range(len(bins))): selected = inds==ind if selected.sum() > 0: xs.append(ind*binwidth-binwidth/2) ys.append(np.mean(method_drifts[selected])) ystds.append(np.std(method_drifts[selected])) + npoints[p] += len(method_drifts[selected]) xs = np.asarray(xs) ys = np.asarray(ys) ystds = np.asarray(ystds) - min_x_method, max_x_method = xs.min(), xs.max() - min_y_method, max_y_method = ys.min(), ys.max() + min_x_method, max_x_method, min_y_method, max_y_method = xs.min(), xs.max(), ys.min(), ys.max() min_x = min_x_method if min_x is None or min_x_method < min_x else min_x max_x = max_x_method if max_x is None or max_x_method > max_x else max_x + max_y = max_y_method if max_y is None or max_y_method > max_y else max_y min_y = min_y_method if min_y is None or min_y_method < min_y else min_y max_y = max_y_method if max_y is None or max_y_method > max_y else max_y - marker = 'o' #if i < 10 else '^' - ax.errorbar(xs, ys, fmt='-', marker=marker, label=method, markersize=6, zorder=2, linewidth=2.5) + ax.errorbar(xs, ys, fmt='-', marker='o', color='w', markersize=8, linewidth=4, zorder=1) + ax.errorbar(xs, ys, fmt='-', marker='o', label=method, markersize=6, linewidth=2, zorder=2) + if show_std: ax.fill_between(xs, ys-ystds, ys+ystds, alpha=0.25) + ax.bar([ind * binwidth-binwidth/2 for ind in range(len(bins))], max_y*npoints/np.max(npoints), alpha=0.15, color='g', width=binwidth, label='density') + ax.set(xlabel=f'Distribution shift between training set and test sample', ylabel=f'{error_name.upper()} (true distribution, predicted distribution)', title=title) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) + ax.axvline(0.02, 0, 1, linestyle='--', color='k') + ax.axvline(0.1055, 0, 1, linestyle='--', color='k') + ax.set_xlim(0, max_x) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) - ax.set_xlim(min_x, max_x) - ax.fill_between([0.02, 0.1055], min_y, max_y, - facecolor='green', alpha=0.25) save_or_show(savepath)