forked from moreo/QuaPy
bugfix and tables generation with ResultSet
This commit is contained in:
parent
8cc2e75534
commit
cbb0d0857a
|
@ -17,11 +17,11 @@ def quantification_models():
|
||||||
return LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1)
|
return LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1)
|
||||||
__C_range = np.logspace(-4, 5, 10)
|
__C_range = np.logspace(-4, 5, 10)
|
||||||
lr_params = {'C': __C_range, 'class_weight': [None, 'balanced']}
|
lr_params = {'C': __C_range, 'class_weight': [None, 'balanced']}
|
||||||
yield 'cc', qp.method.aggregative.CC(newLR()), lr_params
|
#yield 'cc', qp.method.aggregative.CC(newLR()), lr_params
|
||||||
yield 'acc', qp.method.aggregative.ACC(newLR()), lr_params
|
#yield 'acc', qp.method.aggregative.ACC(newLR()), lr_params
|
||||||
yield 'pcc', qp.method.aggregative.PCC(newLR()), lr_params
|
#yield 'pcc', qp.method.aggregative.PCC(newLR()), lr_params
|
||||||
yield 'pacc', qp.method.aggregative.PACC(newLR()), lr_params
|
#yield 'pacc', qp.method.aggregative.PACC(newLR()), lr_params
|
||||||
yield 'sld', lambda learner: qp.method.aggregative.EMQ(newLR()), lr_params
|
yield 'sld', qp.method.aggregative.EMQ(newLR()), lr_params
|
||||||
|
|
||||||
|
|
||||||
def evaluate_experiment(true_prevalences, estim_prevalences):
|
def evaluate_experiment(true_prevalences, estim_prevalences):
|
||||||
|
@ -79,7 +79,7 @@ def run(experiment):
|
||||||
sample_size=sample_size,
|
sample_size=sample_size,
|
||||||
n_prevpoints=21,
|
n_prevpoints=21,
|
||||||
n_repetitions=5,
|
n_repetitions=5,
|
||||||
error='mae',
|
error=optim_loss,
|
||||||
refit=False,
|
refit=False,
|
||||||
verbose=True
|
verbose=True
|
||||||
)
|
)
|
||||||
|
@ -117,7 +117,7 @@ if __name__ == '__main__':
|
||||||
np.random.seed(0)
|
np.random.seed(0)
|
||||||
|
|
||||||
optim_losses = ['mae', 'mrae']
|
optim_losses = ['mae', 'mrae']
|
||||||
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TRAIN
|
datasets = ['hcr']#qp.datasets.TWITTER_SENTIMENT_DATASETS_TRAIN
|
||||||
models = quantification_models()
|
models = quantification_models()
|
||||||
|
|
||||||
results = Parallel(n_jobs=n_jobs)(
|
results = Parallel(n_jobs=n_jobs)(
|
||||||
|
|
|
@ -0,0 +1,208 @@
|
||||||
|
from scipy.stats import wilcoxon, ttest_ind_from_stats
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
"""
|
||||||
|
class Table:
|
||||||
|
def __init__(self):
|
||||||
|
self.tab = {}
|
||||||
|
|
||||||
|
def add(self, col, key, x):
|
||||||
|
if col not in self.tab:
|
||||||
|
self.tab[col] = ResultSet(col)
|
||||||
|
"""
|
||||||
|
|
||||||
|
class ResultSet:
|
||||||
|
VALID_TESTS = [None, "wilcoxon", "ttest_ind_from_stats"]
|
||||||
|
TTEST_DIFF = 'different'
|
||||||
|
TTEST_SIM = 'similar'
|
||||||
|
TTEST_SAME = 'same'
|
||||||
|
|
||||||
|
def __init__(self, name, addfunc, compare='mean', lower_is_better=True, show_std=True, test="wilcoxon",
|
||||||
|
remove_mean='0.', prec_mean=3, remove_std='0.', prec_std=3, maxtone=100, minval=None, maxval=None):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param name: name of the result set (e.g., a Dataset)
|
||||||
|
:param addfunc: a function which is called to process the result input in the "add" method. This function should
|
||||||
|
return a dictionary containing any key-value (e.g., 'mean':0.89) of interest
|
||||||
|
:param compare: the key (as generated by addfunc) that is to be compared in order to rank results
|
||||||
|
:param lower_is_better: if True, lower values of the "compare" key will result in higher ranks
|
||||||
|
:param show_std: whether or not to show the 'std' value (if True, the addfunc is expected to generate it)
|
||||||
|
:param test: which test of statistical significance to use. If "wilcoxon" then scipy.stats.wilcoxon(x,y) will
|
||||||
|
be computed where x,y are the values of the key "values" as computed by addfunc. If "ttest_ind_from_stats", then
|
||||||
|
scipy.stats.ttest_ind_from_stats will be called on "mean", "std", "nobs" values (as computed by addfunc) for
|
||||||
|
both samples being compared.
|
||||||
|
:param remove_mean: if specified, removes the string from the mean (e.g., useful to remove the '0.')
|
||||||
|
:param remove_std: if specified, removes the string from the std (e.g., useful to remove the '0.')
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.addfunc = addfunc
|
||||||
|
self.compare = compare
|
||||||
|
self.lower_is_better = lower_is_better
|
||||||
|
self.show_std = show_std
|
||||||
|
assert test in self.VALID_TESTS, f'unknown test, valid are {self.VALID_TESTS}'
|
||||||
|
self.test = test
|
||||||
|
self.remove_mean = remove_mean
|
||||||
|
self.prec_mean = prec_mean
|
||||||
|
self.remove_std = remove_std
|
||||||
|
self.prec_std = prec_std
|
||||||
|
self.maxtone = maxtone
|
||||||
|
self.minval = minval
|
||||||
|
self.maxval = maxval
|
||||||
|
|
||||||
|
self.r = dict()
|
||||||
|
self.computed = False
|
||||||
|
|
||||||
|
def add(self, key, *args):
|
||||||
|
result = self.addfunc(*args)
|
||||||
|
if result is None:
|
||||||
|
return
|
||||||
|
assert 'values' in result, f'the add function {self.addfunc.__name__} does not fill the "values" attribute'
|
||||||
|
self.r[key] = result
|
||||||
|
vals = self.r[key]['values']
|
||||||
|
if isinstance(vals, np.ndarray):
|
||||||
|
self.r[key]['mean'] = vals.mean()
|
||||||
|
self.r[key]['std'] = vals.std()
|
||||||
|
self.r[key]['nobs'] = len(vals)
|
||||||
|
self.computed = False
|
||||||
|
|
||||||
|
def compute(self):
|
||||||
|
keylist = np.asarray(list(self.r.keys()))
|
||||||
|
vallist = [self.r[key][self.compare] for key in keylist]
|
||||||
|
keylist = keylist[np.argsort(vallist)]
|
||||||
|
|
||||||
|
minval = min(vallist) if self.minval is None else self.minval
|
||||||
|
maxval = max(vallist) if self.maxval is None else self.maxval
|
||||||
|
if not self.lower_is_better:
|
||||||
|
keylist = keylist[::-1]
|
||||||
|
|
||||||
|
# keep track of statistical significance tests; if all are different, then the "phantom dags" will not be shown
|
||||||
|
self.some_similar = False
|
||||||
|
|
||||||
|
for i, key in enumerate(keylist):
|
||||||
|
rank = i + 1
|
||||||
|
isbest = rank == 1
|
||||||
|
if isbest:
|
||||||
|
best = self.r[key]
|
||||||
|
self.r[key]['best'] = isbest
|
||||||
|
self.r[key]['rank'] = rank
|
||||||
|
|
||||||
|
#color
|
||||||
|
val = self.r[key][self.compare]
|
||||||
|
val = (val-minval)/(maxval-minval)
|
||||||
|
if self.lower_is_better:
|
||||||
|
val = 1-val
|
||||||
|
self.r[key]['color'] = color_red2green_01(val, self.maxtone)
|
||||||
|
|
||||||
|
if self.test is not None:
|
||||||
|
if isbest:
|
||||||
|
p_val = 0
|
||||||
|
elif self.test == 'wilcoxon':
|
||||||
|
_, p_val = wilcoxon(best['values'], self.r[key]['values'])
|
||||||
|
elif self.test == 'ttest_ind_from_stats':
|
||||||
|
mean1, std1, nobs1 = best['mean'], best['std'], best['nobs']
|
||||||
|
mean2, std2, nobs2 = self.r[key]['mean'], self.r[key]['std'], self.r[key]['nobs']
|
||||||
|
_, p_val = ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
|
||||||
|
|
||||||
|
if 0.005 >= p_val:
|
||||||
|
self.r[key]['test'] = ResultSet.TTEST_DIFF
|
||||||
|
elif 0.05 >= p_val > 0.005:
|
||||||
|
self.r[key]['test'] = ResultSet.TTEST_SIM
|
||||||
|
self.some_similar = True
|
||||||
|
elif p_val > 0.05:
|
||||||
|
self.r[key]['test'] = ResultSet.TTEST_SAME
|
||||||
|
self.some_similar = True
|
||||||
|
|
||||||
|
self.computed = True
|
||||||
|
|
||||||
|
def latex(self, key, missing='--', color=True):
|
||||||
|
if key not in self.r:
|
||||||
|
return missing
|
||||||
|
|
||||||
|
if not self.computed:
|
||||||
|
self.compute()
|
||||||
|
|
||||||
|
rd = self.r[key]
|
||||||
|
s = f"{rd['mean']:.{self.prec_mean}f}"
|
||||||
|
if self.remove_mean:
|
||||||
|
s = s.replace(self.remove_mean, '.')
|
||||||
|
if rd['best']:
|
||||||
|
s = "\\textbf{"+s+"}"
|
||||||
|
else:
|
||||||
|
if self.test is not None and self.some_similar:
|
||||||
|
if rd['test'] == ResultSet.TTEST_SIM:
|
||||||
|
s += '^{\dag\phantom{\dag}}'
|
||||||
|
elif rd['test'] == ResultSet.TTEST_SAME:
|
||||||
|
s += '^{\ddag}'
|
||||||
|
elif rd['test'] == ResultSet.TTEST_DIFF:
|
||||||
|
s += '^{\phantom{\ddag}}'
|
||||||
|
|
||||||
|
if self.show_std:
|
||||||
|
std = f"{rd['std']:.{self.prec_std}f}"
|
||||||
|
if self.remove_std:
|
||||||
|
std = std.replace(self.remove_std, '.')
|
||||||
|
s += f" \pm {std}"
|
||||||
|
|
||||||
|
s = f'$ {s} $'
|
||||||
|
if color:
|
||||||
|
s += ' ' + self.r[key]['color']
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
def mean(self, attr='mean', required:int=None):
|
||||||
|
"""
|
||||||
|
returns the mean value for the "key" attribute
|
||||||
|
:param attr: the attribute to average across results
|
||||||
|
:param required: if specified, indicates the number of values that should be part of the mean; if this number
|
||||||
|
is different, then the mean is not computed
|
||||||
|
:return: the mean of the "key" attribute
|
||||||
|
"""
|
||||||
|
keylist = list(self.r.keys())
|
||||||
|
vallist = [self.r[key].get(attr, None) for key in keylist]
|
||||||
|
if None in vallist:
|
||||||
|
return None
|
||||||
|
if required is not None:
|
||||||
|
if len(vallist) != required:
|
||||||
|
return None
|
||||||
|
return np.mean(vallist)
|
||||||
|
|
||||||
|
def get(self, key, attr, missing='--'):
|
||||||
|
if key in self.r:
|
||||||
|
if attr in self.r[key]:
|
||||||
|
return self.r[key][attr]
|
||||||
|
return missing
|
||||||
|
|
||||||
|
|
||||||
|
def color_red2green_01(val, maxtone=100):
|
||||||
|
assert 0 <= val <= 1, f'val {val} out of range [0,1]'
|
||||||
|
|
||||||
|
# rescale to [-1,1]
|
||||||
|
val = val * 2 - 1
|
||||||
|
if val < 0:
|
||||||
|
color = 'red'
|
||||||
|
tone = maxtone * (-val)
|
||||||
|
else:
|
||||||
|
color = 'green'
|
||||||
|
tone = maxtone * val
|
||||||
|
return '\cellcolor{' + color + f'!{int(tone)}' + '}'
|
||||||
|
|
||||||
|
|
||||||
|
def add(x):
|
||||||
|
r = np.random.rand(100)/2+x
|
||||||
|
return {
|
||||||
|
'values': r
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
r = ResultSet('dataset1', addfunc=add, show_std=False, minval=0, maxval=1)
|
||||||
|
for x in range(10):
|
||||||
|
r.add(f'a{x}', np.random.randint(0,5) / 10)
|
||||||
|
|
||||||
|
print(r.name)
|
||||||
|
for x in range(10):
|
||||||
|
key = f'a{x}'
|
||||||
|
print(r.latex(key), r.get(key, 'rank'))
|
||||||
|
|
||||||
|
print('----')
|
||||||
|
print(f'ave: {r.mean():.3f}')
|
||||||
|
print(f'averank: {r.mean("rank"):.3f}')
|
||||||
|
"""
|
|
@ -4,6 +4,8 @@ from os import makedirs
|
||||||
import sys, os
|
import sys, os
|
||||||
import pickle
|
import pickle
|
||||||
from experiments import result_path
|
from experiments import result_path
|
||||||
|
from result_manager import ResultSet
|
||||||
|
|
||||||
|
|
||||||
tables_path = './tables'
|
tables_path = './tables'
|
||||||
MAXTONE = 50 # sets the intensity of the maximum color reached by the worst (red) and best (green) results
|
MAXTONE = 50 # sets the intensity of the maximum color reached by the worst (red) and best (green) results
|
||||||
|
@ -26,6 +28,8 @@ qp.environ['SAMPLE_SIZE'] = sample_size
|
||||||
nice = {
|
nice = {
|
||||||
'mae':'AE',
|
'mae':'AE',
|
||||||
'mrae':'RAE',
|
'mrae':'RAE',
|
||||||
|
'ae':'AE',
|
||||||
|
'rae':'RAE',
|
||||||
'svmkld': 'SVM(KLD)',
|
'svmkld': 'SVM(KLD)',
|
||||||
'svmnkld': 'SVM(NKLD)',
|
'svmnkld': 'SVM(NKLD)',
|
||||||
'svmq': 'SVM(Q)',
|
'svmq': 'SVM(Q)',
|
||||||
|
@ -43,8 +47,7 @@ nice = {
|
||||||
'semeval15': 'SemEval15',
|
'semeval15': 'SemEval15',
|
||||||
'semeval16': 'SemEval16'
|
'semeval16': 'SemEval16'
|
||||||
}
|
}
|
||||||
# }
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
|
||||||
def nicerm(key):
|
def nicerm(key):
|
||||||
|
@ -74,18 +77,23 @@ def save_table(path, table):
|
||||||
# Tables evaluation scores for AE and RAE (two tables)
|
# Tables evaluation scores for AE and RAE (two tables)
|
||||||
# ----------------------------------------------------
|
# ----------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
datasets = qp.datasets.TWITTER_SENTIMENT_DATASETS_TEST
|
||||||
evaluation_measures = [qp.error.mae, qp.error.mrae]
|
evaluation_measures = [qp.error.ae, qp.error.rae]
|
||||||
gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'emq', 'svmq', 'svmkld', 'svmnkld']
|
gao_seb_methods = ['cc', 'acc', 'pcc', 'pacc', 'emq', 'svmq', 'svmkld', 'svmnkld']
|
||||||
|
|
||||||
results_dict = {}
|
results_dict = {}
|
||||||
stats={}
|
stats={}
|
||||||
def getscore(dataset, method, loss):
|
def addfunc(dataset, method, loss):
|
||||||
path = result_path(dataset, method, loss)
|
path = result_path(dataset, method, 'm'+loss if not loss.startswith('m') else loss)
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb'))
|
true_prevs, estim_prevs, _, _, _, _ = pickle.load(open(path, 'rb'))
|
||||||
err = getattr(qp.error, loss)
|
err_fn = getattr(qp.error, loss)
|
||||||
return err(true_prevs, estim_prevs)
|
errors = err_fn(true_prevs, estim_prevs)
|
||||||
|
return {
|
||||||
|
'values': errors,
|
||||||
|
}
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@ -96,6 +104,14 @@ for i, eval_func in enumerate(evaluation_measures):
|
||||||
nold_methods = len(gao_seb_methods)
|
nold_methods = len(gao_seb_methods)
|
||||||
nnew_methods = len(added_methods)
|
nnew_methods = len(added_methods)
|
||||||
|
|
||||||
|
# fill table
|
||||||
|
TABLE = {}
|
||||||
|
for dataset in datasets:
|
||||||
|
TABLE[dataset] = ResultSet(dataset, addfunc, show_std=False, test="ttest_ind_from_stats", maxtone=50,
|
||||||
|
remove_mean='0.' if eval_func == qp.error.ae else '')
|
||||||
|
for method in methods:
|
||||||
|
TABLE[dataset].add(method, dataset, method, eval_name)
|
||||||
|
|
||||||
tabular = """
|
tabular = """
|
||||||
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*len(gao_seb_methods))+ '|' + ('Y|'*len(added_methods)) + """} \hline
|
\\begin{tabularx}{\\textwidth}{|c||""" + ('Y|'*len(gao_seb_methods))+ '|' + ('Y|'*len(added_methods)) + """} \hline
|
||||||
& \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} & \multicolumn{"""+str(nnew_methods)+"""}{c||}{} \\\\ \hline
|
& \multicolumn{"""+str(nold_methods)+"""}{c||}{Methods tested in~\cite{Gao:2016uq}} & \multicolumn{"""+str(nnew_methods)+"""}{c||}{} \\\\ \hline
|
||||||
|
@ -108,12 +124,7 @@ for i, eval_func in enumerate(evaluation_measures):
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
tabular += nice.get(dataset, dataset.upper()) + ' '
|
tabular += nice.get(dataset, dataset.upper()) + ' '
|
||||||
for method in methods:
|
for method in methods:
|
||||||
#simplify...
|
tabular += ' & ' + TABLE[dataset].latex(method)
|
||||||
score = getscore(dataset, method, eval_name)
|
|
||||||
if score:
|
|
||||||
tabular += f' & {score:.3f} '
|
|
||||||
else:
|
|
||||||
tabular += ' & --- '
|
|
||||||
tabular += '\\\\\hline\n'
|
tabular += '\\\\\hline\n'
|
||||||
tabular += "\end{tabularx}"
|
tabular += "\end{tabularx}"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue