From b823745dd132db5223fd3d6815cf3bd49b6ca8b4 Mon Sep 17 00:00:00 2001
From: Alejandro Moreo <alejandro.moreo@isti.cnr.it>
Date: Wed, 6 Dec 2023 16:55:06 +0100
Subject: [PATCH] cleaning project

---
 distribution_matching/commons.py              | 101 ++---
 .../figures/histograms_density_plot.py        |   2 +-
 .../tables/gen_tables_compact.py              | 249 +++++++++++++
 .../tables/latex/tables_compact.tex           | 107 ++++++
 distribution_matching/tables/tabular.py       | 348 ++++++++++++++++++
 distribution_matching/tweets_experiments.py   |   1 -
 quapy/model_selection.py                      |   2 +-
 quapy/plot.py                                 |   8 +-
 8 files changed, 735 insertions(+), 83 deletions(-)
 create mode 100644 distribution_matching/tables/gen_tables_compact.py
 create mode 100644 distribution_matching/tables/latex/tables_compact.tex
 create mode 100644 distribution_matching/tables/tabular.py

diff --git a/distribution_matching/commons.py b/distribution_matching/commons.py
index 716768e..6d1f1a2 100644
--- a/distribution_matching/commons.py
+++ b/distribution_matching/commons.py
@@ -6,9 +6,14 @@ from distribution_matching.method_kdey_closed_efficient_correct import KDEyclose
 from quapy.method.aggregative import EMQ, CC, PCC, DistributionMatching, PACC, HDy, OneVsAllAggregative, ACC
 from distribution_matching.method_dirichlety import DIRy
 from sklearn.linear_model import LogisticRegression
-from method_kdey_closed_efficient import KDEyclosed_efficient
+from distribution_matching.method_kdey_closed_efficient import KDEyclosed_efficient
+
+# the full list of methods tested in the paper (reported in the appendix)
+METHODS  = ['ACC', 'PACC', 'HDy-OvA', 'DM-T', 'DM-HD', 'KDEy-HD', 'DM-CS', 'KDEy-CS',  'DIR', 'EMQ', 'EMQ-BCTS', 'KDEy-ML']
+
+# uncomment this other list for the methods shown in the body of the paper (the other methods are not comparable in performance)
+#METHODS  = ['PACC',  'DM-T', 'DM-HD', 'KDEy-HD', 'DM-CS', 'KDEy-CS',  'EMQ', 'KDEy-ML']
 
-METHODS  = ['ACC', 'PACC', 'HDy-OvA', 'DM-T', 'DM-HD', 'KDEy-DMhd4', 'DM-CS', 'KDEy-closed++',  'DIR', 'EMQ', 'KDEy-ML'] #['ACC', 'PACC', 'HDy-OvA', 'DIR', 'DM', 'KDEy-DMhd3', 'KDEy-closed++', 'EMQ', 'KDEy-ML'] #, 'KDEy-DMhd2'] #, 'KDEy-DMhd2', 'DM-HD'] 'KDEy-DMjs', 'KDEy-DM', 'KDEy-ML+', 'KDEy-DMhd3+', 'EMQ-C',
 BIN_METHODS = [x.replace('-OvA', '') for x in METHODS]
 
 
@@ -17,6 +22,12 @@ hyper_LR = {
     'classifier__class_weight': ['balanced', None]
 }
 
+hyper_kde = {
+    'bandwidth': np.linspace(0.01, 0.2, 20)
+}
+
+nbins_range = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 64]
+
 def new_method(method, **lr_kwargs):
 
     lr = LogisticRegression(**lr_kwargs)
@@ -33,33 +44,22 @@ def new_method(method, **lr_kwargs):
     elif method == 'PACC':
         param_grid = hyper_LR
         quantifier = PACC(lr)
-    elif method == 'KDEy-ML':
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='max_likelihood', val_split=10)
-    elif method == 'KDEy-closed':
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEyclosed(lr, val_split=10)
-    elif method == 'KDEy-closed+':
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEyclosed_efficient(lr, val_split=10)
-    elif method == 'KDEy-closed++':
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
+    elif method in ['KDEy-HD']:
+        param_grid = {**hyper_kde, **hyper_LR}
+        quantifier = KDEy(lr, target='min_divergence', divergence='HD', montecarlo_trials=10000, val_split=10)
+    elif method == 'KDEy-CS':
+        param_grid = {**hyper_kde, **hyper_LR}
         quantifier = KDEyclosed_efficient_corr(lr, val_split=10)
-    elif method in ['KDEy-DM']:
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence', divergence='l2', montecarlo_trials=5000, val_split=10)
+    elif method == 'KDEy-ML':
+        param_grid = {**hyper_kde, **hyper_LR}
+        quantifier = KDEy(lr, target='max_likelihood', val_split=10)
     elif method == 'DIR':
         param_grid = hyper_LR
         quantifier = DIRy(lr)
     elif method == 'EMQ':
         param_grid = hyper_LR
         quantifier = EMQ(lr)
-    elif method == 'EMQ-C':
+    elif method == 'EMQ-BCTS':
         method_params = {'exact_train_prev': [False], 'recalib': ['bcts']}
         param_grid = {**method_params, **hyper_LR}
         quantifier = EMQ(lr)
@@ -69,17 +69,9 @@ def new_method(method, **lr_kwargs):
     elif method == 'HDy-OvA':
         param_grid = {'binary_quantifier__' + key: val for key, val in hyper_LR.items()}
         quantifier = OneVsAllAggregative(HDy(lr))
-    elif method == 'DM':
-        method_params = {
-            'nbins': [4,8,16,32],
-            'val_split': [10, 0.4],
-            'divergence': ['HD', 'topsoe', 'l2']
-        }
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = DistributionMatching(lr)
     elif method == 'DM-T':
         method_params = {
-            'nbins': [2,3,4,5,6,7,8,9,10,12,14,16,18,20,22,24,26,28,30,32,64],
+            'nbins': nbins_range,
             'val_split': [10],
             'divergence': ['topsoe']
         }
@@ -87,7 +79,7 @@ def new_method(method, **lr_kwargs):
         quantifier = DistributionMatching(lr)
     elif method == 'DM-HD':
         method_params = {
-            'nbins': [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 64],
+            'nbins': nbins_range,
             'val_split': [10],
             'divergence': ['HD']
         }
@@ -95,55 +87,12 @@ def new_method(method, **lr_kwargs):
         quantifier = DistributionMatching(lr)
     elif method == 'DM-CS':
         method_params = {
-            'nbins': [2,3,4,5,6,7,8,9,10,12,14,16,18,20,22,24,26,28,30,32,64],
+            'nbins': nbins_range,
             'val_split': [10],
             'divergence': ['CS']
         }
         param_grid = {**method_params, **hyper_LR}
         quantifier = DistributionMatching(lr)
-
-    # experimental
-    elif method in ['KDEy-DMkld']:
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence', divergence='KLD', montecarlo_trials=5000, val_split=10)
-    # elif method in ['KDEy-DMhd']:
-    #     The code to reproduce this run is commented in the min_divergence target, I think it was incorrect...
-    #     method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-    #     param_grid = {**method_params, **hyper_LR}
-    #     quantifier = KDEy(lr, target='min_divergence', divergence='HD', montecarlo_trials=5000, val_split=10)
-    elif method in ['KDEy-DMhd2']:
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence_uniform', divergence='HD', montecarlo_trials=5000, val_split=10)
-    elif method in ['KDEy-DMjs']:
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence_uniform', divergence='JS', montecarlo_trials=5000, val_split=10)
-    elif method in ['KDEy-DMhd3']:
-        # I have realized that there was an error. I am sampling from the validation distribution (V) and not from the
-        # test distribution (T) just because the validation can be sampled in fit only once and pre-computed densities
-        # can be stored. This means that the reference distribution is V and not T. Then I have found that an
-        # f-divergence is defined as D(p||q) \int_{R^n}q(x)f(p(x)/q(x))dx = E_{x~q}[f(p(x)/q(x))], so if I am sampling
-        # V then I am computing D(T||V) (and not D(V||T) as I thought).
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence', divergence='HD', montecarlo_trials=5000, val_split=10)
-    elif method in ['KDEy-DMhd4']:
-        # This is the new version in which we apply importance sampling, i.e., we compute:
-        #   D(p_a||q) = 1/N sum_x f(p(x)/q(x)) * (q(x)/r(x))
-        # where x ~iid r, with r = p_u, and u = (1/n, 1/n, ..., 1/n) the uniform vector
-        method_params = {'bandwidth': np.linspace(0.01, 0.2, 20)}
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = KDEy(lr, target='min_divergence', divergence='HD', montecarlo_trials=10000, val_split=10)
-    elif method == 'DM-HD':
-        method_params = {
-            'nbins': [4,8,16,32],
-            'val_split': [10, 0.4],
-        }
-        param_grid = {**method_params, **hyper_LR}
-        quantifier = DistributionMatching(lr, divergence='HD')
-
     else:
         raise NotImplementedError('unknown method', method)
 
diff --git a/distribution_matching/figures/histograms_density_plot.py b/distribution_matching/figures/histograms_density_plot.py
index f4d59bd..72887c6 100644
--- a/distribution_matching/figures/histograms_density_plot.py
+++ b/distribution_matching/figures/histograms_density_plot.py
@@ -53,7 +53,7 @@ for i, post_set in enumerate([post_c1, post_c2, post_c3, post_test]):
     ax = fig.add_subplot(141+i, projection='3d')
     for post, c, z in zip(post_set.T, colors, positions):
 
-        hist, bins = np.histogram(post, bins=nbins, density=True)
+        hist, bins = np.histogram(post, bins=nbins, density=True, range=[0,1])
         xs = (bins[:-1] + bins[1:])/2
 
         ax.bar(xs, hist, width=1/nbins, zs=z, zdir='y', color=c, ec=c, alpha=0.6)
diff --git a/distribution_matching/tables/gen_tables_compact.py b/distribution_matching/tables/gen_tables_compact.py
new file mode 100644
index 0000000..d8a8d9f
--- /dev/null
+++ b/distribution_matching/tables/gen_tables_compact.py
@@ -0,0 +1,249 @@
+from distribution_matching.commons import BIN_METHODS, METHODS
+import quapy as qp
+from os import makedirs
+import os
+
+from tabular import Table
+import pandas as pd
+
+tables_path = '.'
+# makedirs(tables_path, exist_ok=True)
+
+MAXTONE = 35  # sets the intensity of the maximum color reached by the worst (red) and best (green) results
+SHOW_STD = False
+
+NUM_ADJUSTMENT_METHODS = 2 if 'ACC' in METHODS else 1
+NUM_MAXIMUM_LIKELIHOOD_METHODS = 4 if 'DIR' in METHODS else 3
+NUM_DISTRIBUTION_MATCHING_PAIRS = 2
+NUM_DISTRIBUTION_MATCHING_METHODS = NUM_DISTRIBUTION_MATCHING_PAIRS*2 + (2 if 'HDy-OvA' in METHODS else 1)
+
+qp.environ['SAMPLE_SIZE'] = 100
+
+nice_bench = {
+    'sanders': 'Sanders',
+    'semeval13': 'SemEval13',
+    'semeval14': 'SemEval14',
+    'semeval15': 'SemEval15',
+    'semeval16': 'SemEval16',
+}
+
+nice_method={
+    'KDEy-MLE': 'KDEy-ML',
+    'KDEy-DMhd4': 'KDEy-HD',
+    'KDEy-closed++': 'KDEy-CS',
+    'EMQ-C': 'EMQ-BCTS'
+}
+
+def save_table(path, table):
+    print(f'saving results in {path}')
+    with open(path, 'wt') as foo:
+        foo.write(table)
+
+
+def nicerm(key):
+    return '\mathrm{'+nice[key]+'}'
+
+
+def make_table(tabs, eval, benchmark_groups, benchmark_names, compact=False):
+
+    n_methods = len(METHODS)
+    assert n_methods == (NUM_ADJUSTMENT_METHODS+NUM_DISTRIBUTION_MATCHING_METHODS+NUM_MAXIMUM_LIKELIHOOD_METHODS), \
+        "Unexpected number of methods"
+
+    cline = "\cline{2-" + str(n_methods+ 1) + "}"
+
+    # write the latex table
+    tabular = """
+            \\begin{tabular}{|c|""" + ('c|' * NUM_ADJUSTMENT_METHODS) + 'c|c' + ('|c|c' * (NUM_DISTRIBUTION_MATCHING_PAIRS)) +  ('|c' * NUM_MAXIMUM_LIKELIHOOD_METHODS) + """|} """ + cline + """           
+            \multicolumn{1}{c}{} & 
+            \multicolumn{"""+str(NUM_ADJUSTMENT_METHODS)+"""}{|c}{Adjustment} & 
+            \multicolumn{"""+str(NUM_DISTRIBUTION_MATCHING_METHODS)+"""}{|c|}{Distribution Matching} & 
+            \multicolumn{"""+str(NUM_MAXIMUM_LIKELIHOOD_METHODS)+"""}{c|}{Maximum Likelihood} \\\\
+            \hline               
+            """
+    for i, (tab, group, name) in enumerate(zip(tabs, benchmark_groups, benchmark_names)):
+        tablines = tab.latexTabular(benchmark_replace=nice_bench, method_replace=nice_method, endl='\\\\'+ cline, aslines=True)
+        print(tablines)
+        tablines[0] = tablines[0].replace('\multicolumn{1}{c|}{}', '\\textbf{'+name+'}')
+        if not compact:
+            tabular += '\n'.join(tablines)
+        else:
+            # if compact, keep the method names and the average; discard the rest
+            tabular += tablines[0] + '\n' + tablines[-1] + '\n'
+
+        tabular += "\n" + "\\textit{Rank} & " + tab.getRankTable(prec_mean=0 if name.startswith('LeQua') else 1).latexAverage()
+        if i < (len(tabs) - 1):
+            tabular += "\\hline\n"
+        else:
+            tabular += "\n"
+    tabular += "\end{tabular}"
+    return tabular
+
+
+def gen_tables_uci_multiclass(eval):
+
+    print('Generating table for UCI Multiclass Datasets', eval)
+    dir_results = f'../results/ucimulti/{eval}'
+
+    datasets = qp.datasets.UCI_MULTICLASS_DATASETS
+
+    tab = Table(
+        benchmarks=datasets,
+        methods=METHODS,
+        ttest='wilcoxon',
+        prec_mean=4,
+        show_std=SHOW_STD,
+        prec_std=4,
+        clean_zero=(eval=='mae'),
+        average=True,
+        maxtone=MAXTONE
+    )
+
+    for dataset in datasets:
+        print(f'\t Dataset: {dataset}: ', end='')
+        for method in METHODS:
+            result_path = f'{dir_results}/{method}_{dataset}.dataframe'
+            if os.path.exists(result_path):
+                df = pd.read_csv(result_path)
+                print(f'{method}', end=' ')
+                tab.add(dataset, method, df[eval].values)
+            else:
+                print(f'MISSING-{method}', end=' ')
+        print()
+
+    return tab
+
+
+def gen_tables_uci_bin(eval):
+
+    print('Generating table for UCI Datasets', eval)
+    dir_results = f'../results/binary/{eval}'
+
+    exclude = ['acute.a', 'acute.b', 'iris.1', 'balance.2']
+    datasets = [x for x in qp.datasets.UCI_DATASETS if x not in exclude]
+
+    tab = Table(
+        benchmarks=datasets,
+        methods=BIN_METHODS,
+        ttest='wilcoxon',
+        prec_mean=4,
+        show_std=SHOW_STD,
+        prec_std=4,
+        clean_zero=(eval=='mae'),
+        average=True,
+        maxtone=MAXTONE
+    )
+
+    for dataset in datasets:
+        print(f'\t Dataset: {dataset}: ', end='')
+        for method in BIN_METHODS:
+            result_path = f'{dir_results}/{method}_{dataset}.dataframe'
+            if os.path.exists(result_path):
+                df = pd.read_csv(result_path)
+                print(f'{method}', end=' ')
+                tab.add(dataset, method, df[eval].values)
+            else:
+                print(f'MISSING-{method}', end=' ')
+
+    return tab
+
+    
+
+def gen_tables_tweet(eval):
+
+    print('Generating table for Twitter', eval)
+    dir_results = f'../results/tweet/{eval}'
+
+    datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
+
+    tab = Table(
+        benchmarks=datasets,
+        methods=METHODS,
+        ttest='wilcoxon',
+        prec_mean=4,
+        show_std=SHOW_STD,
+        prec_std=4,
+        clean_zero=(eval=='mae'),
+        average=True,
+        maxtone=MAXTONE
+    )
+
+    for dataset in datasets:
+        print(f'\t Dataset: {dataset}: ', end='')
+        for method in METHODS:
+            result_path = f'{dir_results}/{method}_{dataset}.dataframe'
+            if os.path.exists(result_path):
+                df = pd.read_csv(result_path)
+                print(f'{method}', end=' ')
+                tab.add(dataset, method, df[eval].values)
+            else:
+                print(f'MISSING-{method}', end=' ')
+        print()
+
+    return tab
+
+
+def gen_tables_lequa(Methods, task, eval):
+    # generating table for LeQua-T1A or Lequa-T1B; only one table with two rows, one for MAE, another for MRAE
+    dataset_name = 'LeQua-'+task
+
+    tab = Table(
+        benchmarks=[f'Average'],
+        methods=Methods,
+        ttest='wilcoxon',
+        prec_mean=5,
+        show_std=SHOW_STD,
+        prec_std=4,
+        clean_zero=False,
+        average=False,
+        maxtone=MAXTONE
+    )
+
+    print('Generating table for T1A@Lequa', eval, end='')
+    dir_results = f'../results/lequa/{task}/{eval}'
+
+    for method in Methods:
+        result_path = f'{dir_results}/{method}.dataframe'
+        if os.path.exists(result_path):
+            df = pd.read_csv(result_path)
+            print(f'{method}', end=' ')
+            tab.add('Average', method, df[eval].values)
+        else:
+            print(f'MISSING-{method}', end=' ')
+    print()
+
+    return tab
+
+
+
+if __name__ == '__main__':
+    os.makedirs('./latex', exist_ok=True)
+
+    for eval in ['mae', 'mrae']:
+        tabs = []
+        tabs.append(gen_tables_tweet(eval))
+        tabs.append(gen_tables_uci_multiclass(eval))
+        tabs.append(gen_tables_lequa(METHODS, 'T1B', eval))
+
+        names = ['Tweets', 'UCI-multi', 'LeQua-T1B']
+        table = make_table(tabs, eval, benchmark_groups=tabs, benchmark_names=names)
+        save_table(f'./latex/multiclass_{eval}.tex', table)
+
+    for eval in ['mae', 'mrae']:
+        tabs = []
+        tabs.append(gen_tables_uci_bin(eval))
+        
+        # print uci-binary with all datasets for the appendix
+        table = make_table(tabs, eval, benchmark_groups=tabs, benchmark_names=['UCI-binary'])
+        save_table(f'./latex/ucibinary_{eval}.tex', table)
+        
+        # print uci-bin compacted plus lequa-T1A for the main body
+        tabs.append(gen_tables_lequa(BIN_METHODS, 'T1A', eval))
+        table = make_table(tabs, eval, benchmark_groups=tabs, benchmark_names=['UCI-binary', 'LeQua-T1A'], compact=True)
+        save_table(f'./latex/binary_{eval}.tex', table)
+
+    print("[Tables Done] runing latex")
+    os.chdir('./latex/')
+    os.system('pdflatex tables_compact.tex')
+    os.system('rm tables_compact.aux tables_compact.bbl tables_compact.blg tables_compact.log tables_compact.out tables_compact.dvi')
+
diff --git a/distribution_matching/tables/latex/tables_compact.tex b/distribution_matching/tables/latex/tables_compact.tex
new file mode 100644
index 0000000..f15bc23
--- /dev/null
+++ b/distribution_matching/tables/latex/tables_compact.tex
@@ -0,0 +1,107 @@
+\documentclass{article}
+
+
+
+\usepackage[utf8]{inputenc} % allow utf-8 input
+\usepackage[T1]{fontenc}    % use 8-bit T1 fonts
+\usepackage{hyperref}       % hyperlinks
+\usepackage{url}            % simple URL typesetting
+\usepackage{booktabs}       % professional-quality tables
+\usepackage{amsfonts}       % blackboard math symbols
+\usepackage{nicefrac}       % compact symbols for 1/2, etc.
+\usepackage{microtype}      % microtypography
+\usepackage{lipsum}
+\usepackage{fancyhdr}       % header
+\usepackage{graphicx}       % graphics
+\graphicspath{{media/}}     % organize your images and other figures under media/ folder
+\usepackage{amsmath}
+\usepackage{bm}
+\usepackage{tabularx}
+\usepackage{color}
+\usepackage{colortbl}
+\usepackage{xcolor}
+\usepackage{lmodern}
+
+\DeclareMathOperator*{\argmax}{arg\,max}
+\DeclareMathOperator*{\argmin}{arg\,min}
+
+\newif\ifdraft
+\drafttrue
+
+\newcommand{\juanjo}[1]{\ifdraft{\leavevmode\color{purple}{[JJ]:
+{#1}}}\else{\vspace{0ex}}\fi}
+
+\newcommand{\alex}[1]{\ifdraft{\leavevmode\color{violet}{[AM]:
+{#1}}}\else{\vspace{0ex}}\fi}
+
+\newcommand{\pablo}[1]{\ifdraft{\leavevmode\color{red}{[PG]:
+{#1}}}\else{\vspace{0ex}}\fi}
+
+
+\title{Tables} 
+
+
+\author{
+  Alejandro Moreo 
+}
+
+
+\begin{document}
+
+\maketitle
+
+
+
+\begin{table}[h]
+ \centering
+ \caption{Multiclass AE}
+\resizebox{\textwidth}{!}{%
+\input{multiclass_mae}
+}%
+\end{table}
+
+
+\begin{table}[h]
+ \centering
+ \caption{Multiclass RAE}
+\resizebox{\textwidth}{!}{%
+\input{multiclass_mae}
+}%
+\end{table}
+
+\begin{table}[h]
+ \centering
+ \caption{Binary MAE}
+\resizebox{\textwidth}{!}{%
+\input{binary_mae}
+}%
+\end{table}
+
+\begin{table}[h]
+  \centering
+  \caption{Binary MRAE}
+ \resizebox{\textwidth}{!}{%
+ \input{binary_mrae}
+ }%
+ \end{table}
+
+\begin{table}[h]
+ \centering
+ \caption{UCI binary full AE}
+\resizebox{\textwidth}{!}{%
+\input{ucibinary_mae}
+}%
+\end{table}
+
+\begin{table}[h]
+ \centering
+ \caption{UCI binary full RAE}
+\resizebox{\textwidth}{!}{%
+\input{ucibinary_mrae}
+}%
+\end{table}
+
+
+
+\end{document}
+
diff --git a/distribution_matching/tables/tabular.py b/distribution_matching/tables/tabular.py
new file mode 100644
index 0000000..eff7a44
--- /dev/null
+++ b/distribution_matching/tables/tabular.py
@@ -0,0 +1,348 @@
+import numpy as np
+import itertools
+from scipy.stats import ttest_ind_from_stats, wilcoxon
+
+
+class Table:
+    VALID_TESTS = [None, "wilcoxon", "ttest"]
+
+    def __init__(self, benchmarks, methods, lower_is_better=True, ttest='ttest', prec_mean=3,
+                 clean_zero=False, show_std=False, prec_std=3, average=True, missing=None, missing_str='--', color=True, maxtone=50):
+        assert ttest in self.VALID_TESTS, f'unknown test, valid are {self.VALID_TESTS}'
+
+        self.benchmarks = np.asarray(benchmarks)
+        self.benchmark_index = {row:i for i, row in enumerate(benchmarks)}
+
+        self.methods = np.asarray(methods)
+        self.method_index = {col:j for j, col in enumerate(methods)}
+
+        self.map = {}  
+        # keyed (#rows,#cols)-ndarrays holding computations from self.map['values']
+        self._addmap('values', dtype=object)
+        self.lower_is_better = lower_is_better
+        self.ttest = ttest
+        self.prec_mean = prec_mean
+        self.clean_zero = clean_zero
+        self.show_std = show_std
+        self.prec_std = prec_std
+        self.add_average = average
+        self.missing = missing
+        self.missing_str = missing_str
+        self.color = color
+        self.maxtone = maxtone
+        
+        self.touch()
+
+    @property
+    def nbenchmarks(self):
+        return len(self.benchmarks)
+
+    @property
+    def nmethods(self):
+        return len(self.methods)
+
+    def touch(self):
+        self._modif = True
+
+    def update(self):
+        if self._modif:
+            self.compute()
+
+    def _getfilled(self):
+        return np.argwhere(self.map['fill'])
+
+    @property
+    def values(self):
+        return self.map['values']
+
+    def _indexes(self):
+        return itertools.product(range(self.nbenchmarks), range(self.nmethods))
+
+    def _addmap(self, map, dtype, func=None):
+        self.map[map] = np.empty((self.nbenchmarks, self.nmethods), dtype=dtype)
+        if func is None:
+            return
+        m = self.map[map]
+        f = func
+        indexes = self._indexes() if map == 'fill' else self._getfilled()
+        for i, j in indexes:
+            m[i, j] = f(self.values[i, j])
+
+    def _addrank(self):
+        for i in range(self.nbenchmarks):
+            filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
+            col_means = [self.map['mean'][i,j] for j in filled_cols_idx]
+            ranked_cols_idx = filled_cols_idx[np.argsort(col_means)]
+            if not self.lower_is_better:
+                ranked_cols_idx = ranked_cols_idx[::-1]
+            self.map['rank'][i, ranked_cols_idx] = np.arange(1, len(filled_cols_idx)+1)
+            
+    def _addcolor(self):
+        for i in range(self.nbenchmarks):
+            filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
+            if filled_cols_idx.size==0:
+                continue
+            col_means = [self.map['mean'][i,j] for j in filled_cols_idx]
+            #col_means = [self.map['rank'][i, j] for j in filled_cols_idx]
+
+            minval = min(col_means)
+            maxval = max(col_means)
+
+            for col_idx in filled_cols_idx:
+                val = self.map['mean'][i,col_idx]
+                norm = (maxval - minval)
+                if norm > 0:
+                    normval = (val - minval) / norm
+                else:
+                    normval = 0.5
+
+                if self.lower_is_better:
+                    normval = 1 - normval
+
+                normval = np.clip(normval, 0,1)
+
+                self.map['color'][i, col_idx] = color_red2green_01(normval, self.maxtone)
+
+    def _run_ttest(self, row, col1, col2):
+        mean1 = self.map['mean'][row, col1]
+        std1 = self.map['std'][row, col1]
+        nobs1 = self.map['nobs'][row, col1]
+        mean2 = self.map['mean'][row, col2]
+        std2 = self.map['std'][row, col2]
+        nobs2 = self.map['nobs'][row, col2]
+        _, p_val = ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
+        return p_val
+
+    def _run_wilcoxon(self, row, col1, col2):
+        values1 = self.map['values'][row, col1]
+        values2 = self.map['values'][row, col2]
+        try:
+            _, p_val = wilcoxon(values1, values2)
+        except ValueError:
+            p_val = 0
+        return p_val
+
+    def _add_statistical_test(self):
+        if self.ttest is None:
+            return
+        self.some_similar = [False]*self.nmethods
+        for i in range(self.nbenchmarks):
+            filled_cols_idx = np.argwhere(self.map['fill'][i]).flatten()
+            if len(filled_cols_idx) <= 1:
+                continue
+            col_means = [self.map['mean'][i,j] for j in filled_cols_idx]
+            best_pos = filled_cols_idx[np.argmin(col_means)]
+
+            for j in filled_cols_idx:
+                if j==best_pos:
+                    continue
+                if self.ttest == 'ttest':
+                    p_val = self._run_ttest(i, best_pos, j)
+                else:
+                    p_val = self._run_wilcoxon(i, best_pos, j)
+
+                pval_outcome = pval_interpretation(p_val)
+                self.map['ttest'][i, j] = pval_outcome
+                if pval_outcome != 'Diff':
+                    self.some_similar[j] = True
+
+    def compute(self):
+        self._addmap('fill', dtype=bool, func=lambda x: x is not None)
+        self._addmap('mean', dtype=float, func=np.mean)
+        self._addmap('std', dtype=float, func=np.std)
+        self._addmap('nobs', dtype=float, func=len)
+        self._addmap('rank', dtype=int, func=None)
+        self._addmap('color', dtype=object, func=None)
+        self._addmap('ttest', dtype=object, func=None)
+        self._addmap('latex', dtype=object, func=None)
+        self._addrank()
+        self._addcolor()
+        self._add_statistical_test()
+        if self.add_average:
+            self._addave()
+        self._modif = False
+
+    def _is_column_full(self, col):
+        return all(self.map['fill'][:, self.method_index[col]])
+
+    def _addave(self):
+        ave = Table(['ave'], self.methods,
+                    lower_is_better=self.lower_is_better,
+                    ttest=self.ttest,
+                    average=False,
+                    missing=self.missing,
+                    missing_str=self.missing_str,
+                    prec_mean=self.prec_mean,
+                    prec_std=self.prec_std,
+                    clean_zero=self.clean_zero,
+                    show_std=self.show_std,
+                    color=self.color,
+                    maxtone=self.maxtone)
+        for col in self.methods:
+            values = None
+            if self._is_column_full(col):
+                if self.ttest == 'ttest':
+                    # values = np.asarray(self.map['mean'][:, self.method_index[col]])
+                    values = np.concatenate(self.values[:, self.method_index[col]])
+                else:  # wilcoxon
+                    # values = np.asarray(self.map['mean'][:, self.method_index[col]])
+                    values = np.concatenate(self.values[:, self.method_index[col]])
+            ave.add('ave', col, values)
+        self.average = ave
+
+    def add(self, benchmark, method, values):
+        if values is not None:
+            values = np.asarray(values)
+            if values.ndim==0:
+                values = values.flatten()
+        rid, cid = self._coordinates(benchmark, method)
+        self.map['values'][rid, cid] = values
+        self.touch()
+
+    def get(self, benchmark, method, attr='mean'):
+        self.update()
+        assert attr in self.map, f'unknwon attribute {attr}'
+        rid, cid = self._coordinates(benchmark, method)
+        if self.map['fill'][rid, cid]:
+            v = self.map[attr][rid, cid]
+            if v is None or (isinstance(v,float) and np.isnan(v)):
+                return self.missing
+            return v
+        else:
+            return self.missing
+
+    def _coordinates(self, benchmark, method):
+        assert benchmark in self.benchmark_index, f'benchmark {benchmark} out of range'
+        assert method in self.method_index, f'method {method} out of range'
+        rid = self.benchmark_index[benchmark]
+        cid = self.method_index[method]
+        return rid, cid
+
+    def get_average(self, method, attr='mean'):
+        self.update()
+        if self.add_average:
+            return self.average.get('ave', method, attr=attr)
+        return None
+
+    def get_color(self, benchmark, method):
+        color = self.get(benchmark, method, attr='color')
+        if color is None:
+            return ''
+        return color
+
+    def latex(self, benchmark, method):
+        self.update()
+        i,j = self._coordinates(benchmark, method)
+        if self.map['fill'][i,j] == False:
+            return self.missing_str
+
+        mean = self.map['mean'][i,j]
+        l = f" {mean:.{self.prec_mean}f}"
+        if self.clean_zero:
+            l = l.replace(' 0.', '.')
+
+        isbest = self.map['rank'][i,j] == 1
+        if isbest:
+            l = "\\textbf{"+l.strip()+"}"
+
+        stat = '' if self.ttest is None else '^{\phantom{\ddag}}'
+        if self.ttest is not None and self.some_similar[j]:
+            test_label = self.map['ttest'][i,j]
+            if test_label == 'Sim':
+                stat = '^{\dag}'
+            elif test_label == 'Same':
+                stat = '^{\ddag}'
+            elif isbest or test_label == 'Diff':
+                stat = '^{\phantom{\ddag}}'
+
+        std = ''
+        if self.show_std:
+            std = self.map['std'][i,j]
+            std = f" {std:.{self.prec_std}f}"
+            if self.clean_zero:
+                std = std.replace(' 0.', '.')
+            std = f"\pm {std:{self.prec_std}}"
+
+        if stat!='' or std!='':
+            l = f'{l}${stat}{std}$'
+
+        if self.color:
+            l += ' ' + self.map['color'][i,j]
+
+        return l
+
+    def latexTabular(self, benchmark_replace={}, method_replace={}, aslines=False, endl='\\\\\hline'):
+        lines = []
+        l = '\multicolumn{1}{c|}{} & '
+        l += ' & '.join([method_replace.get(col, col) for col in self.methods])
+        l += ' \\\\\hline'
+        lines.append(l)
+
+        for row in self.benchmarks:
+            rowname = benchmark_replace.get(row, row)
+            l = rowname + ' & '
+            l += self.latexRow(row, endl=endl)
+            lines.append(l)
+
+        if self.add_average:
+            # l += '\hline\n'
+            l = '\hline \n \\textit{Average} & '
+            l += self.latexAverage(endl=endl)
+            lines.append(l)
+        if not aslines:
+            lines='\n'.join(lines)
+        return lines
+
+    def latexRow(self, benchmark, endl='\\\\\hline\n'):
+        s = [self.latex(benchmark, col) for col in self.methods]
+        s = ' & '.join(s)
+        s += ' ' + endl
+        return s
+
+    def latexAverage(self, endl='\\\\\hline\n'):
+        if self.add_average:
+            return self.average.latexRow('ave', endl=endl)
+
+    def getRankTable(self, prec_mean=0):
+        t = Table(benchmarks=self.benchmarks, methods=self.methods, prec_mean=prec_mean, average=True, maxtone=self.maxtone, ttest=None)
+        for rid, cid in self._getfilled():
+            row = self.benchmarks[rid]
+            col = self.methods[cid]
+            t.add(row, col, self.get(row, col, 'rank'))
+        t.compute()
+        return t
+
+    def dropMethods(self, methods):
+        drop_index = [self.method_index[m] for m in methods]
+        new_methods = np.delete(self.methods, drop_index)
+        new_index = {col:j for j, col in enumerate(new_methods)}
+
+        self.map['values'] = self.values[:,np.asarray([self.method_index[m] for m in new_methods], dtype=int)]
+        self.methods = new_methods
+        self.method_index = new_index
+        self.touch()
+
+
+def pval_interpretation(p_val):
+    if 0.005 >= p_val:
+        return 'Diff'
+    elif 0.05 >= p_val > 0.005:
+        return 'Sim'
+    elif p_val > 0.05:
+        return 'Same'
+
+
+def color_red2green_01(val, maxtone=50):
+    if np.isnan(val): return None
+    assert 0 <= val <= 1, f'val {val} out of range [0,1]'
+
+
+    # rescale to [-1,1]
+    val = val * 2 - 1
+    if val < 0:
+        color = 'red'
+        tone = maxtone * (-val)
+    else:
+        color = 'green'
+        tone = maxtone * val
+    return '\cellcolor{' + color + f'!{int(tone)}' + '}'
diff --git a/distribution_matching/tweets_experiments.py b/distribution_matching/tweets_experiments.py
index db66b2e..3f8230c 100644
--- a/distribution_matching/tweets_experiments.py
+++ b/distribution_matching/tweets_experiments.py
@@ -24,7 +24,6 @@ if __name__ == '__main__':
         for method in METHODS:
 
             print('Init method', method)
-            if method == 'EMQ-C': continue
 
             global_result_path = f'{result_dir}/{method}'
 
diff --git a/quapy/model_selection.py b/quapy/model_selection.py
index 1d256f9..91f2c7c 100644
--- a/quapy/model_selection.py
+++ b/quapy/model_selection.py
@@ -224,7 +224,7 @@ def cross_val_predict(quantifier: BaseQuantifier, data: LabelledCollection, nfol
     for train, test in data.kFCV(nfolds=nfolds, random_state=random_state):
         quantifier.fit(train)
         fold_prev = quantifier.quantify(test.X)
-        rel_size = len(test.X)/len(data)
+        rel_size = len(test)/len(data)
         total_prev += fold_prev*rel_size
 
     return total_prev
diff --git a/quapy/plot.py b/quapy/plot.py
index cdc3bd5..1ea30ac 100644
--- a/quapy/plot.py
+++ b/quapy/plot.py
@@ -11,7 +11,7 @@ import quapy as qp
 
 plt.rcParams['figure.figsize'] = [10, 6]
 plt.rcParams['figure.dpi'] = 200
-plt.rcParams['font.size'] = 18
+plt.rcParams['font.size'] = 12
 
 
 def binary_diagonal(method_names, true_prevs, estim_prevs, pos_class=1, title=None, show_std=True, legend=True,
@@ -259,7 +259,7 @@ def error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs,
     data = _join_data_by_drift(method_names, true_prevs, estim_prevs, tr_prevs, x_error, y_error, method_order)
 
     if method_order is None:
-        method_order = method_names
+        method_order = np.unique(method_names)
 
     _set_colors(ax, n_methods=len(method_order))
 
@@ -330,8 +330,8 @@ def error_by_drift(method_names, true_prevs, estim_prevs, tr_prevs,
     
     if show_legend:
         fig.legend(loc='lower center',
-                  bbox_to_anchor=(1, 0.5),
-                  ncol=(len(method_names)+1)//2)
+                  bbox_to_anchor=(0.9, 0.2),
+                  ncol=1)
       
     _save_or_show(savepath)