import pickle import os from time import time from collections import defaultdict from tqdm import tqdm import numpy as np from sklearn.linear_model import LogisticRegression import quapy as qp from KDEy.kdey_devel import KDEyMLauto, optim_minimize from method._kdey import KDEBase from quapy.method.aggregative import PACC, EMQ, KDEyML from quapy.model_selection import GridSearchQ from quapy.protocol import UPP from pathlib import Path from quapy import functional as F import matplotlib.pyplot as plt SEED = 1 def newLR(): return LogisticRegression(max_iter=1000)#, C=1, class_weight='balanced') SAMPLE_SIZE=150 qp.environ['SAMPLE_SIZE'] = SAMPLE_SIZE show_ae = True show_rae = True show_mse = False show_kld = True epsilon = 1e-10 # n_bags_test = 2 # DATASETS = [qp.datasets.UCI_MULTICLASS_DATASETS[21]] DATASETS = qp.datasets.UCI_MULTICLASS_DATASETS for i, dataset in enumerate(DATASETS): def generate_data(): data = qp.datasets.fetch_UCIMulticlassDataset(dataset) n_classes = data.n_classes print(f'{i=}') print(f'{dataset=}') print(f'{n_classes=}') print(len(data.training)) print(len(data.test)) train, test = data.train_test train_prev = train.prevalence() test_prev = test.prevalence() print(f'train-prev = {F.strprev(train_prev)}') print(f'test-prev = {F.strprev(test_prev)}') repeats = 10 prot = UPP(test, sample_size=SAMPLE_SIZE, repeats=repeats) kde = KDEyMLauto(newLR()) kde.fit(train) AE_error, RAE_error, MSE_error, KLD_error, LIKE_value = [], [], [], [], [] tr_posteriors, tr_y = kde.classif_predictions.Xy for it, (sample, prev) in tqdm(enumerate(prot()), total=repeats): te_posteriors = kde.classifier.predict_proba(sample) classes = train.classes_ xaxis = [] ae_error = [] rae_error = [] mse_error = [] kld_error = [] likelihood_value = [] # for bandwidth in np.linspace(0.01, 0.2, 50): for bandwidth in np.logspace(-5, 0.5, 50): mix_densities = kde.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth) test_densities = [kde.pdf(kde_i, te_posteriors) for kde_i in mix_densities] def neg_loglikelihood_prev(prev): test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities)) test_loglikelihood = np.log(test_mixture_likelihood + epsilon) return -np.sum(test_loglikelihood) init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,)) pred_prev, likelihood = optim_minimize(neg_loglikelihood_prev, init_prev, return_loss=True) xaxis.append(bandwidth) ae_error.append(qp.error.ae(prev, pred_prev)) rae_error.append(qp.error.rae(prev, pred_prev)) mse_error.append(qp.error.mse(prev, pred_prev)) kld_error.append(qp.error.kld(prev, pred_prev)) likelihood_value.append(likelihood) AE_error.append(ae_error) RAE_error.append(rae_error) MSE_error.append(mse_error) KLD_error.append(kld_error) LIKE_value.append(likelihood_value) return xaxis, AE_error, RAE_error, MSE_error, KLD_error, LIKE_value xaxis, AE_error, RAE_error, MSE_error, KLD_error, LIKE_value = qp.util.pickled_resource( f'./plots/likelihood/pickles/{dataset}.pkl', generate_data) for row in range(len(AE_error)): # Crear la figura # ---------------------------------------------------------------------------------------------------- fig, ax1 = plt.subplots(figsize=(8, 6)) # Pintar las series ae_error, rae_error, y kld_error en el primer eje Y if show_ae: ax1.plot(xaxis, AE_error[row], label='AE', marker='o', color='b') if show_rae: ax1.plot(xaxis, RAE_error[row], label='RAE', marker='s', color='g') if show_kld: ax1.plot(xaxis, KLD_error[row], label='KLD', marker='^', color='r') if show_mse: ax1.plot(xaxis, MSE_error[row], label='MSE', marker='^', color='c') ax1.set_xscale('log') # Configurar etiquetas para el primer eje Y ax1.set_xlabel('Bandwidth') ax1.set_ylabel('Error Value') ax1.grid(True) ax1.legend(loc='upper left') # Crear un segundo eje Y que comparte el eje X ax2 = ax1.twinx() # Pintar likelihood_val en el segundo eje Y ax2.plot(xaxis, LIKE_value[row], label='(neg)Likelihood', marker='x', color='purple') # Configurar etiquetas para el segundo eje Y ax2.set_ylabel('Likelihood Value') ax2.legend(loc='upper right') # Mostrar el gráfico plt.title('Error Metrics vs Bandwidth') # plt.show() os.makedirs('./plots/likelihood/', exist_ok=True) plt.savefig(f'./plots/likelihood/{dataset}-fig{row}.png') plt.close() # Crear la figura con las medias # ---------------------------------------------------------------------------------------------------- fig, ax1 = plt.subplots(figsize=(8, 6)) def add_plot(ax, vals_error, name, color, marker, show): if not show: return vals_error = np.asarray(vals_error) vals_ave = np.mean(vals_error, axis=0) vals_std = np.std(vals_error, axis=0) ax.plot(xaxis, vals_ave, label=name, marker=marker, color=color) ax.fill_between(xaxis, vals_ave - vals_std, vals_ave + vals_std, color=color, alpha=0.2) add_plot(ax1, AE_error, 'AE', color='b', marker='o', show=show_ae) add_plot(ax1, RAE_error, 'RAE', color='g', marker='s', show=show_rae) add_plot(ax1, KLD_error, 'KLD', color='r', marker='^', show=show_kld) add_plot(ax1, MSE_error, 'MSE', color='c', marker='^', show=show_mse) ax1.set_xscale('log') # Configurar etiquetas para el primer eje Y ax1.set_xlabel('Bandwidth') ax1.set_ylabel('Error Value') ax1.grid(True) ax1.legend(loc='upper left') # Crear un segundo eje Y que comparte el eje X ax2 = ax1.twinx() # Pintar likelihood_val en el segundo eje Y add_plot(ax2, LIKE_value, '(neg)Likelihood', color='purple', marker='x', show=True) # Configurar etiquetas para el segundo eje Y ax2.set_ylabel('Likelihood Value') ax2.legend(loc='upper right') # Mostrar el gráfico plt.title('Error Metrics vs Bandwidth') # plt.show() os.makedirs('./plots/likelihood/', exist_ok=True) plt.savefig(f'./plots/likelihood/{dataset}-figAve.png') plt.close()