QuaPy/BayesianKDEy/map_experiments.py

170 lines
7.0 KiB
Python

import os.path
from pathlib import Path
import pandas as pd
from sklearn.linear_model import LogisticRegression
from copy import deepcopy as cp
import quapy as qp
from BayesianKDEy.commons import KDEyReduce
from _bayeisan_kdey import BayesianKDEy
from _bayesian_mapls import BayesianMAPLS
from commons import experiment_path, KDEyCLR, RESULT_DIR, MockClassifierFromPosteriors, KDEyScaledB, KDEyFresh
# import datasets
from datasets import LeQuaHandler, UCIMulticlassHandler, DatasetHandler, VisualDataHandler, CIFAR100Handler
from method.confidence import ConfidenceIntervals
from temperature_calibration import temp_calibration
from build.lib.quapy.data import LabelledCollection
from quapy.method.aggregative import DistributionMatchingY as DMy, AggregativeQuantifier, EMQ, CC
from quapy.model_selection import GridSearchQ
from quapy.data import Dataset
from quapy.method.confidence import BayesianCC, AggregativeBootstrap
from quapy.method.aggregative import KDEyML, ACC
from quapy.protocol import UPP
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from time import time
def methods(data_handler: DatasetHandler):
"""
Returns a tuple (name, quantifier, hyperparams, bayesian/bootstrap_constructor), where:
- name: is a str representing the name of the method (e.g., 'BayesianKDEy')
- quantifier: is the base model (e.g., KDEyML())
- hyperparams: is a dictionary for the quantifier (e.g., {'bandwidth': [0.001, 0.005, 0.01, 0.05, 0.1, 0.2]})
- bayesian/bootstrap_constructor: is a function that instantiates the bayesian o bootstrap method with the
quantifier with optimized hyperparameters
"""
if isinstance(data_handler, VisualDataHandler):
Cls = LogisticRegression
cls_hyper = {}
else:
Cls = LogisticRegression
cls_hyper = {'classifier__C': np.logspace(-4, 4, 9), 'classifier__class_weight': ['balanced', None]}
kdey_hyper = {'bandwidth': np.logspace(-3, -1, 10), **cls_hyper}
kdey_hyper_larger = {'bandwidth': np.logspace(-1, 0, 10), **cls_hyper}
kdey_hyper_clr = {'bandwidth': np.logspace(-2, 2, 10), **cls_hyper}
# surrogate quantifiers
kde_gau_scale = KDEyScaledB(Cls())
yield 'KDEy-G-exp', kdey_hyper, KDEyML(Cls())
# yield 'KDEy-G-exp2', kdey_hyper_larger, KDEyML(Cls())
# yield 'KDEy-G-log', kdey_hyper, KDEyML(Cls(), logdensities=True)
yield 'KDEy-Ait', kdey_hyper_clr, KDEyCLR(Cls())
def model_selection(dataset: DatasetHandler, point_quantifier: AggregativeQuantifier, grid: dict):
with qp.util.temp_seed(0):
if isinstance(point_quantifier, KDEyScaledB) and 'bandwidth' in grid:
def scale_bandwidth(bandwidth, n_classes, beta=0.5):
return bandwidth * np.power(n_classes, beta)
n = dataset.get_training().n_classes
grid['bandwidth'] = [scale_bandwidth(b, n) for b in grid['bandwidth']]
print('bandwidth scaled')
print(f'performing model selection for {point_quantifier.__class__.__name__} with grid {grid}')
# model selection
if len(grid) > 0:
train, val_prot = dataset.get_train_valprot_for_modsel()
mod_sel = GridSearchQ(
model=point_quantifier,
param_grid=grid,
protocol=val_prot,
refit=False,
n_jobs=-1,
verbose=True
).fit(*train.Xy)
best_params = mod_sel.best_params_
else:
best_params = {}
return best_params
def experiment(dataset: DatasetHandler,
point_quantifier: AggregativeQuantifier,
method_name: str,
grid: dict,
hyper_choice_path: Path):
with qp.util.temp_seed(0):
# model selection
best_hyperparams = qp.util.pickled_resource(
hyper_choice_path, model_selection, dataset, cp(point_quantifier), grid
)
print(f'{best_hyperparams=}')
t_init = time()
training, test_generator = dataset.get_train_testprot_for_eval()
point_quantifier.fit(*training.Xy)
tr_time = time() - t_init
# test
train_prevalence = training.prevalence()
results = defaultdict(list)
pbar = tqdm(enumerate(test_generator()), total=test_generator.total())
for i, (sample_X, true_prevalence) in pbar:
t_init = time()
point_estimate = point_quantifier.predict(sample_X)
ttime = time() - t_init
results['true-prevs'].append(true_prevalence)
results['point-estim'].append(point_estimate)
results['shift'].append(qp.error.ae(true_prevalence, train_prevalence))
results['ae'].append(qp.error.ae(prevs_true=true_prevalence, prevs_hat=point_estimate))
results['rae'].append(qp.error.rae(prevs_true=true_prevalence, prevs_hat=point_estimate))
results['sre'].append(qp.error.sre(prevs_true=true_prevalence, prevs_hat=point_estimate, prevs_train=train_prevalence))
results['test-time'].append(ttime)
pbar.set_description(
f'{method_name} MAE={np.mean(results["ae"]):.5f} W={np.mean(results["sre"]):.5f}')
report = {
'optim_hyper': best_hyperparams,
'train_time': tr_time,
'train-prev': train_prevalence,
'results': {k: np.asarray(v) for k, v in results.items()},
}
return report
if __name__ == '__main__':
result_dir = Path('results_map')
reports = defaultdict(list)
for data_handler in [UCIMulticlassHandler]: # , UCIMulticlassHandler, LeQuaHandler, VisualDataHandler, CIFAR100Handler]:
for dataset in data_handler.iter():
qp.environ['SAMPLE_SIZE'] = dataset.sample_size
# print(f'dataset={dataset.name}')
problem_type = 'binary' if dataset.is_binary() else 'multiclass'
for method_name, hyper_params, quantifier in methods(dataset):
result_path = experiment_path(result_dir / problem_type, dataset.name, method_name)
hyper_path = experiment_path(result_dir / 'hyperparams' / problem_type, dataset.name, method_name)
# if os.path.exists(result_path):
report = qp.util.pickled_resource(
result_path, experiment, dataset, quantifier, method_name, hyper_params, hyper_path
)
reports['dataset'].append(dataset.name)
reports['method'].append(method_name)
reports['MAE'].append(report["results"]["ae"].mean())
reports['SRE'].append(report["results"]["sre"].mean())
reports['h'].append(report["optim_hyper"]["bandwidth"])
print(f'dataset={dataset.name}, '
f'method={method_name}: '
f'mae={reports["MAE"][-1]:.5f}, '
f'W={reports["SRE"][-1]:.5f} '
f'h={reports["h"][-1]}')
pv = pd.DataFrame(reports).pivot_table(values=['MAE', 'SRE', 'h'], index='dataset', columns='method', margins=True)
print(pv)