1
0
Fork 0
QuaPy/NewMethods/uci_tables.py

81 lines
2.6 KiB
Python

import quapy as qp
import numpy as np
from os import makedirs
import sys, os
import pickle
import argparse
from common import *
from uci_experiments import result_path
from tabular import Table
from uci_experiments import *
import itertools
tables_path = './tables_uci'
MAXTONE = 50 # sets the intensity of the maximum color reached by the worst (red) and best (green) results
makedirs(tables_path, exist_ok=True)
qp.environ['SAMPLE_SIZE'] = SAMPLE_SIZE
METHODS = [#'cc', 'acc',
# 'pcc',
# 'pacc',
# 'wpacc',
'pcc.opt',
'pacc.opt',
'wpacc.opt',
'ds.opt',
# 'pcc.opt.svm',
# 'pacc.opt.svm',
# 'wpacc.opt.svm',
# 'wpacc.opt2',
# 'MAX', 'MS', 'MS2',
'sldc',
# 'svmmae',
# 'hdy',
# 'ehdymaeds',
# 'EMdiag', 'EMfull', 'EMtied', 'EMspherical',
# 'VEMdiag', 'VEMfull', 'VEMtied', 'VEMspherical',
]
if __name__ == '__main__':
results = 'results_uci'
datasets = qp.datasets.UCI_DATASETS
datasets.remove('acute.a')
datasets.remove('acute.b')
datasets.remove('iris.1')
evaluation_measures = [qp.error.ae, qp.error.rae, qp.error.kld]
for i, eval_func in enumerate(evaluation_measures):
eval_name = eval_func.__name__
# Tables evaluation scores for the evaluation measure
# ----------------------------------------------------
# fill data table
table = Table(benchmarks=datasets, methods=METHODS)
for dataset, method, run in itertools.product(datasets, METHODS, range(N_FOLDS*N_REPEATS)):
table.add(dataset, method, experiment_errors(results, dataset, method, run, eval_name, optim_loss='ae'))
# write the latex table
nmethods = len(METHODS)
tabular = """
\\resizebox{\\textwidth}{!}{%
\\begin{tabular}{|c||""" + ('c|' * nmethods) + '|' + """} \hline
& \multicolumn{""" + str(nmethods) + """}{c||}{Quantification methods} \\\\ \hline
"""
rowreplace={dataset: nicename(dataset) for dataset in datasets}
colreplace={method: nicename(method, eval_name, side=True) for method in METHODS}
tabular += table.latexTabular(benchmark_replace=rowreplace, method_replace=colreplace)
tabular += 'Rank Average & ' + table.getRankTable().latexAverage()
tabular += """
\end{tabular}%
}
"""
save_table(f'{tables_path}/tab_results_{eval_name}.tex', tabular)
print("[Done]")