diff --git a/KDEy/kdey_devel.py b/KDEy/kdey_devel.py
index 958c870..57cc487 100644
--- a/KDEy/kdey_devel.py
+++ b/KDEy/kdey_devel.py
@@ -17,6 +17,8 @@ import quapy.functional as F
 
 epsilon = 1e-10
 
+BANDWIDTH_RANGE = (0.001, 0.2)
+
 class KDEyMLauto(KDEyML):
     def __init__(self, classifier: BaseEstimator = None, val_split=5, random_state=None, optim='two_steps'):
         self.classifier = qp._get_classifier(classifier)
@@ -218,7 +220,7 @@ class KDEyMLauto(KDEyML):
 
     def choose_bandwidth_maxlikelihood_search(self, tr_posteriors, tr_y, te_posteriors, classes):
         n_classes = len(classes)
-        init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
+        init_prev = F.uniform_prevalence(n_classes)
 
         def neglikelihood_band(bandwidth):
             mix_densities = self.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth[0])
@@ -258,7 +260,7 @@ def optim_minimize(loss: Callable, init_prev: np.ndarray, return_loss=False):
     constraints = ({'type': 'eq', 'fun': lambda x: 1 - sum(x)})  # values summing up to 1
     r = optimize.minimize(loss, x0=init_prev, method='SLSQP', bounds=bounds, constraints=constraints)
     # print(f'iterations-prevalence={r.nit}')
-    assert r.success, 'Process did not converge!'
+    # assert r.success, 'Process did not converge!'
     if return_loss:
         return r.x, r.fun
     else:
@@ -268,7 +270,7 @@ def optim_minimize(loss: Callable, init_prev: np.ndarray, return_loss=False):
 
 class KDEyMLauto2(KDEyML):
 
-    def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1, random_state=None, reduction=100, max_reduced=500, target='likelihood'):
+    def __init__(self, classifier: BaseEstimator=None, val_split=5, bandwidth=0.1, random_state=None, reduction=100, max_reduced=500, target='likelihood', search='grid'):
         """
         reduction: number of examples per class for automatically setting the bandwidth
         """
@@ -281,8 +283,10 @@ class KDEyMLauto2(KDEyML):
         self.reduction = reduction
         self.max_reduced = max_reduced
         self.random_state = random_state
-        assert target in ['likelihood', 'likelihood+'] or target in qp.error.QUANTIFICATION_ERROR_NAMES, 'unknown target for auto'
+        assert target in ['likelihood'] or target in qp.error.QUANTIFICATION_ERROR_NAMES, 'unknown target for auto'
+        assert search in ['grid', 'optim'], 'unknown value for search'
         self.target = target
+        self.search = search
 
     def aggregation_fit(self, classif_predictions: LabelledCollection, data: LabelledCollection):
         if self.bandwidth == 'auto':
@@ -303,65 +307,42 @@ class KDEyMLauto2(KDEyML):
             if len(train) > tr_length:
                 train = train.sampling(tr_length)
 
-        init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
+        init_prev = F.uniform_prevalence(n_classes=n_classes)
         repeats = 25
         prot = UPP(val, sample_size=self.reduction, repeats=repeats, random_state=self.random_state)
 
-        if self.target == 'likelihood+':
+        def eval_bandwidth(bandwidth):
+            mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
+            loss_accum = 0
+            for (sample, prevtrue) in prot():
+                test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
 
-            def neg_loglikelihood_bandwidth(bandwidth):
-                mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
+                def neg_loglikelihood_prev(prev):
+                    test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
+                    test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
+                    nll = -np.sum(test_loglikelihood)
+                    return nll
 
-                loss_accum = 0
-                for (sample, prevtrue) in prot():
-                    test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
+                if self.target == 'likelihood':
+                    loss_fn = neg_loglikelihood_prev
+                else:
+                    loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prev, prev_hat)
 
-                    def neg_loglikelihood_prev(prev):
-                        test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
-                        test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
-                        nll = -np.sum(test_loglikelihood)
-                        return nll
+                pred_prev, neglikelihood = optim_minimize(loss_fn, init_prev, return_loss=True)
+                loss_accum += neglikelihood
+            return loss_accum
 
-                    pred_prev, neglikelihood = optim_minimize(neg_loglikelihood_prev, init_prev, return_loss=True)
-                    # print(f'\t\tprev={F.strprev(pred_prev)} (true={F.strprev(prev)}) got {neglikelihood=}')
-                    loss_accum += neglikelihood
-                return loss_accum
-
-            r = optimize.minimize_scalar(neg_loglikelihood_bandwidth, bounds=(0.00001, 0.2))
+        if self.search == 'optim':
+            r = optimize.minimize_scalar(eval_bandwidth, bounds=(0.001, 0.2), options={'xatol': 0.005})
             best_band = r.x
             best_loss_value = r.fun
             nit = r.nit
             # assert r.success, 'Process did not converge!'
-            #found bandwidth=0.00994664 after nit=3 iterations loss_val=-212247.24305)
 
-        else:
-            best_band = None
-            best_loss_value = None
-            init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
-            for bandwidth in np.logspace(-4, np.log10(0.2), 20):
-                mix_densities = self.get_mixture_components(*train.Xy, train.classes_, bandwidth)
-
-                loss_accum = 0
-                for (sample, prev) in tqdm(prot(), total=repeats):
-                    test_densities = [self.pdf(kde_i, sample) for kde_i in mix_densities]
-
-                    def neg_loglikelihood_prev_(prev):
-                        test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
-                        test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
-                        return -np.sum(test_loglikelihood)
-
-                    if self.target == 'likelihood':
-                        loss_fn = neg_loglikelihood_prev_
-                    else:
-                        loss_fn = lambda prev_hat: qp.error.from_name(self.target)(prev, prev_hat)
-
-                    pred_prev, loss_val = optim_minimize(loss_fn, init_prev, return_loss=True)
-                    loss_accum += loss_val
-
-                if best_loss_value is None or loss_accum < best_loss_value:
-                    best_loss_value = loss_accum
-                    best_band = bandwidth
+        elif self.search=='grid':
             nit=20
+            band_evals = [(band, eval_bandwidth(band)) for band in np.logspace(-4, np.log10(0.2), num=nit)]
+            best_band, best_loss_value = sorted(band_evals, key=lambda x:x[1])[0]
 
         print(f'found bandwidth={best_band:.8f} after {nit=} iterations loss_val={best_loss_value:.5f})')
         self.bandwidth_ = best_band
diff --git a/KDEy/quantification_evaluation.py b/KDEy/quantification_evaluation.py
index 06d8b7f..b35dfe8 100644
--- a/KDEy/quantification_evaluation.py
+++ b/KDEy/quantification_evaluation.py
@@ -22,7 +22,7 @@ def newLR():
 
 # typical hyperparameters explored for Logistic Regression
 logreg_grid = {
-    'C': np.logspace(-3,3,7),
+    'C': np.logspace(-4,4,9),
     'class_weight': [None, 'balanced']
 }
 
@@ -34,14 +34,16 @@ def wrap_hyper(classifier_hyper_grid: dict):
 METHODS = [
     ('PACC', PACC(newLR()), wrap_hyper(logreg_grid)),
     ('EMQ', EMQ(newLR()), wrap_hyper(logreg_grid)),
-    ('KDEy-ML',  KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
+    ('KDEy',  KDEyML(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
     # ('KDEy-MLred',  KDEyMLred(newLR()), {**wrap_hyper(logreg_grid), **{'bandwidth': np.logspace(-4, np.log10(0.2), 20)}}),
-    ('KDEy-ML-scott', KDEyML(newLR(), bandwidth='scott'), wrap_hyper(logreg_grid)),
-    ('KDEy-ML-silver', KDEyML(newLR(), bandwidth='silverman'), wrap_hyper(logreg_grid)),
-    ('KDEy-ML-autoLike',  KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood'), wrap_hyper(logreg_grid)),
-    ('KDEy-ML-autoLike+',  KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood+'), wrap_hyper(logreg_grid)), 
-    ('KDEy-ML-autoAE',  KDEyMLauto2(newLR(), bandwidth='auto', target='mae'), wrap_hyper(logreg_grid)),
-    ('KDEy-ML-autoRAE',  KDEyMLauto2(newLR(), bandwidth='auto', target='mrae'), wrap_hyper(logreg_grid)),
+    ('KDEy-scott', KDEyML(newLR(), bandwidth='scott'), wrap_hyper(logreg_grid)),
+    ('KDEy-silver', KDEyML(newLR(), bandwidth='silverman'), wrap_hyper(logreg_grid)),
+    ('KDEy-NLL',  KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='grid'), wrap_hyper(logreg_grid)),
+    ('KDEy-NLL+',  KDEyMLauto2(newLR(), bandwidth='auto', target='likelihood', search='optim'), wrap_hyper(logreg_grid)),
+    ('KDEy-AE',  KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='grid'), wrap_hyper(logreg_grid)),
+    ('KDEy-AE+',  KDEyMLauto2(newLR(), bandwidth='auto', target='mae', search='optim'), wrap_hyper(logreg_grid)),
+    ('KDEy-RAE',  KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='grid'), wrap_hyper(logreg_grid)),
+    ('KDEy-RAE',  KDEyMLauto2(newLR(), bandwidth='auto', target='mrae', search='optim'), wrap_hyper(logreg_grid)),
 ]
 
 
@@ -80,12 +82,80 @@ def show_results(result_path):
     print(pv)
 
 
+def run_experiment(method_name, quantifier, param_grid):
+
+    print('Init method', method_name)
+
+    with open(global_result_path + '.csv', 'at') as csv:
+        for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
+            print('init', dataset)
+
+            # run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
+            local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
+
+            if os.path.exists(local_result_path):
+                print(f'result file {local_result_path} already exist; skipping')
+                report = qp.util.load_report(local_result_path)
+
+            else:
+                with qp.util.temp_seed(SEED):
+
+                    data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
+                    train, test = data.train_test
+
+                    transductive_names = [name for (name, *_) in TRANSDUCTIVE_METHODS]
+
+                    if method_name not in transductive_names:
+                        if len(param_grid) == 0:
+                            t_init = time()
+                            quantifier.fit(train)
+                            train_time = time() - t_init
+                        else:
+                            # model selection (train)
+                            train, val = train.split_stratified(random_state=SEED)
+                            protocol = UPP(val, repeats=n_bags_val)
+                            modsel = GridSearchQ(
+                                quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
+                            )
+                            t_init = time()
+                            try:
+                                modsel.fit(train)
+                                print(f'best params {modsel.best_params_}')
+                                print(f'best score {modsel.best_score_}')
+                                quantifier = modsel.best_model()
+                            except:
+                                print('something went wrong... trying to fit the default model')
+                                quantifier.fit(train)
+                            train_time = time() - t_init
+                    else:
+                        # transductive
+                        t_init = time()
+                        quantifier.fit(train)  # <-- nothing actually (proyects the X into posteriors only)
+                        train_time = time() - t_init
+
+                    # test
+                    t_init = time()
+                    protocol = UPP(test, repeats=n_bags_test)
+                    report = qp.evaluation.evaluation_report(
+                        quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True
+                    )
+                    test_time = time() - t_init
+                    report['tr_time'] = train_time
+                    report['te_time'] = test_time
+                    report.to_csv(local_result_path)
+
+            means = report.mean(numeric_only=True)
+            csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
+            csv.flush()
+
+
+
 if __name__ == '__main__':
 
     qp.environ['SAMPLE_SIZE'] = 500
     qp.environ['N_JOBS'] = -1
-    n_bags_val = 25
-    n_bags_test = 100
+    n_bags_val = 100
+    n_bags_test = 500
     result_dir = f'results_quantification/ucimulti'
 
     os.makedirs(result_dir, exist_ok=True)
@@ -95,69 +165,6 @@ if __name__ == '__main__':
         csv.write(f'Method\tDataset\tMAE\tMRAE\tKLD\tTR-TIME\tTE-TIME\n')
 
     for method_name, quantifier, param_grid in METHODS + TRANSDUCTIVE_METHODS:
-
-        print('Init method', method_name)
-
-        with open(global_result_path + '.csv', 'at') as csv:
-            for dataset in qp.datasets.UCI_MULTICLASS_DATASETS:
-                print('init', dataset)
-
-                # run_experiment(global_result_path, method_name, quantifier, param_grid, dataset)
-                local_result_path = os.path.join(Path(global_result_path).parent, method_name + '_' + dataset + '.dataframe')
-
-                if os.path.exists(local_result_path):
-                    print(f'result file {local_result_path} already exist; skipping')
-                    report = qp.util.load_report(local_result_path)
-
-                else:
-                    with qp.util.temp_seed(SEED):
-
-                        data = qp.datasets.fetch_UCIMulticlassDataset(dataset, verbose=True)
-                        train, test = data.train_test
-
-                        transductive_names = [name for (name, *_) in TRANSDUCTIVE_METHODS]
-
-                        if method_name not in transductive_names:
-                            if len(param_grid) == 0:
-                                t_init = time()
-                                quantifier.fit(train)
-                                train_time = time() - t_init
-                            else:
-                                # model selection (train)
-                                train, val = train.split_stratified(random_state=SEED)
-                                protocol = UPP(val, repeats=n_bags_val)
-                                modsel = GridSearchQ(
-                                    quantifier, param_grid, protocol, refit=True, n_jobs=-1, verbose=1, error='mae'
-                                )
-                                t_init = time()
-                                try:
-                                    modsel.fit(train)
-                                    print(f'best params {modsel.best_params_}')
-                                    print(f'best score {modsel.best_score_}')
-                                    quantifier = modsel.best_model()
-                                except:
-                                    print('something went wrong... trying to fit the default model')
-                                    quantifier.fit(train)
-                                train_time = time() - t_init
-                        else:
-                            # transductive
-                            t_init = time()
-                            quantifier.fit(train)  # <-- nothing actually (proyects the X into posteriors only)
-                            train_time = time() - t_init
-
-                        # test
-                        t_init = time()
-                        protocol = UPP(test, repeats=n_bags_test)
-                        report = qp.evaluation.evaluation_report(
-                            quantifier, protocol, error_metrics=['mae', 'mrae', 'kld'], verbose=True
-                        )
-                        test_time = time() - t_init
-                        report['tr_time'] = train_time
-                        report['te_time'] = test_time
-                        report.to_csv(local_result_path)
-
-                means = report.mean(numeric_only=True)
-                csv.write(f'{method_name}\t{dataset}\t{means["mae"]:.5f}\t{means["mrae"]:.5f}\t{means["kld"]:.5f}\t{means["tr_time"]:.3f}\t{means["te_time"]:.3f}\n')
-                csv.flush()
+        run_experiment(method_name, quantifier, param_grid)
 
     show_results(global_result_path)
\ No newline at end of file
diff --git a/KDEy/quantification_evaluation_debug.py b/KDEy/quantification_evaluation_debug.py
index a2f5e69..073fe75 100644
--- a/KDEy/quantification_evaluation_debug.py
+++ b/KDEy/quantification_evaluation_debug.py
@@ -21,7 +21,127 @@ SEED = 1
 
 
 def newLR():
-    return LogisticRegression(max_iter=1000)#, C=1, class_weight='balanced')
+    return LogisticRegression(max_iter=1000)
+
+
+def plot(xaxis, metrics_measurements, metrics_names, suffix):
+    fig, ax1 = plt.subplots(figsize=(8, 6))
+
+    def add_plot(ax, mean_error, std_error, name, color, marker):
+        ax.plot(xaxis, mean_error, label=name, marker=marker, color=color)
+        if std_error is not None:
+            ax.fill_between(xaxis, mean_error - std_error, mean_error + std_error, color=color, alpha=0.2)
+
+    colors = ['b', 'g', 'r', 'c', 'purple']
+
+    def get_mean_std(measurement):
+        measurement = np.asarray(measurement)
+        measurement_mean = np.mean(measurement, axis=0)
+        if measurement.ndim == 2:
+            measurement_std = np.std(measurement, axis=0)
+        else:
+            measurement_std = None
+        return measurement_mean, measurement_std
+
+    for i, (measurement, name) in enumerate(zip(metrics_measurements, metrics_names)):
+        color = colors[i%len(colors)]
+        add_plot(ax1, *get_mean_std(measurement), name, color=color, marker='o')
+
+    ax1.set_xscale('log')
+
+    # Configurar etiquetas para el primer eje Y
+    ax1.set_xlabel('Bandwidth')
+    ax1.set_ylabel('Normalized value')
+    ax1.grid(True)
+    ax1.legend(loc='upper left')
+
+    # Crear un segundo eje Y que comparte el eje X
+    # ax2 = ax1.twinx()
+
+    # Pintar likelihood_val en el segundo eje Y
+    # add_plot(ax2, *get_mean_std(likelihood_measurements), name='NLL', color='purple', marker='x')
+
+    # Configurar etiquetas para el segundo eje Y
+    # ax1.set_ylabel('neg log likelihood')
+    # ax1.legend(loc='upper right')
+
+    # Mostrar el gráfico
+    plt.title(dataset)
+    # plt.show()
+    os.makedirs('./plots/likelihood/', exist_ok=True)
+
+    plt.savefig(f'./plots/likelihood/{dataset}-fig{suffix}.png')
+    plt.close()
+
+
+def generate_data(from_train=False):
+    data = qp.datasets.fetch_UCIMulticlassDataset(dataset)
+    n_classes = data.n_classes
+    print(f'{i=}')
+    print(f'{dataset=}')
+    print(f'{n_classes=}')
+    print(len(data.training))
+    print(len(data.test))
+
+    train, test = data.train_test
+    if from_train:
+        train, test = train.split_stratified(0.5)
+    train_prev = train.prevalence()
+    test_prev = test.prevalence()
+
+    print(f'train-prev = {F.strprev(train_prev)}')
+    print(f'test-prev = {F.strprev(test_prev)}')
+
+    repeats = 10
+    prot = UPP(test, sample_size=SAMPLE_SIZE, repeats=repeats)
+    kde = KDEyMLauto(newLR())
+    kde.fit(train)
+    AE_error, RAE_error, MSE_error, KLD_error, LIKE_value = [], [], [], [], []
+    tr_posteriors, tr_y = kde.classif_predictions.Xy
+    for sample_no, (sample, prev) in tqdm(enumerate(prot()), total=repeats):
+        te_posteriors = kde.classifier.predict_proba(sample)
+        classes = train.classes_
+
+        xaxis = []
+        ae_error = []
+        rae_error = []
+        mse_error = []
+        kld_error = []
+        likelihood_value = []
+
+        # for bandwidth in np.linspace(0.01, 0.2, 50):
+        for bandwidth in np.logspace(-5, np.log10(0.2), 50):
+            mix_densities = kde.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth)
+            test_densities = [kde.pdf(kde_i, te_posteriors) for kde_i in mix_densities]
+
+            def neg_loglikelihood_prev(prev):
+                test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
+                test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
+                return -np.sum(test_loglikelihood)
+
+            init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
+            pred_prev, likelihood = optim_minimize(neg_loglikelihood_prev, init_prev, return_loss=True)
+
+            xaxis.append(bandwidth)
+            ae_error.append(qp.error.ae(prev, pred_prev))
+            rae_error.append(qp.error.rae(prev, pred_prev))
+            mse_error.append(qp.error.mse(prev, pred_prev))
+            kld_error.append(qp.error.kld(prev, pred_prev))
+            likelihood_value.append(likelihood)
+
+        AE_error.append(ae_error)
+        RAE_error.append(rae_error)
+        MSE_error.append(mse_error)
+        KLD_error.append(kld_error)
+        LIKE_value.append(likelihood_value)
+
+    return xaxis, AE_error, RAE_error, MSE_error, KLD_error, LIKE_value
+
+
+def normalize_metric(Error_matrix):
+    max_val, min_val = np.max(Error_matrix), np.min(Error_matrix)
+    return (np.asarray(Error_matrix) - min_val) / (max_val - min_val)
+
 
 SAMPLE_SIZE=150
 qp.environ['SAMPLE_SIZE'] = SAMPLE_SIZE
@@ -30,158 +150,57 @@ show_ae = True
 show_rae = True
 show_mse = False
 show_kld = True
+normalize = True
 
 epsilon = 1e-10
-# n_bags_test = 2
-# DATASETS = [qp.datasets.UCI_MULTICLASS_DATASETS[21]]
 DATASETS = qp.datasets.UCI_MULTICLASS_DATASETS
-for i, dataset in enumerate(DATASETS):
+for i, dataset in enumerate(tqdm(DATASETS, desc='processing datasets', total=len(DATASETS))):
 
-    def generate_data():
-        data = qp.datasets.fetch_UCIMulticlassDataset(dataset)
-        n_classes = data.n_classes
-        print(f'{i=}')
-        print(f'{dataset=}')
-        print(f'{n_classes=}')
-        print(len(data.training))
-        print(len(data.test))
 
-        train, test = data.train_test
-        train_prev = train.prevalence()
-        test_prev = test.prevalence()
+    xaxis, AE_error_te, RAE_error_te, MSE_error_te, KLD_error_te, LIKE_value_te = qp.util.pickled_resource(
+        f'./plots/likelihood/pickles/{dataset}.pkl', generate_data, False
+    )
 
-        print(f'train-prev = {F.strprev(train_prev)}')
-        print(f'test-prev = {F.strprev(test_prev)}')
+    xaxis, AE_error_tr, RAE_error_tr, MSE_error_tr, KLD_error_tr, LIKE_value_tr = qp.util.pickled_resource(
+        f'./plots/likelihood/pickles/{dataset}_tr.pkl', generate_data, True
+    )
 
-        repeats = 10
-        prot = UPP(test, sample_size=SAMPLE_SIZE, repeats=repeats)
-        kde = KDEyMLauto(newLR())
-        kde.fit(train)
-        AE_error, RAE_error, MSE_error, KLD_error, LIKE_value = [], [], [], [], []
-        tr_posteriors, tr_y = kde.classif_predictions.Xy
-        for it, (sample, prev) in tqdm(enumerate(prot()), total=repeats):
-            te_posteriors = kde.classifier.predict_proba(sample)
-            classes = train.classes_
 
-            xaxis = []
-            ae_error = []
-            rae_error = []
-            mse_error = []
-            kld_error = []
-            likelihood_value = []
-
-            # for bandwidth in np.linspace(0.01, 0.2, 50):
-            for bandwidth in np.logspace(-5, 0.5, 50):
-                mix_densities = kde.get_mixture_components(tr_posteriors, tr_y, classes, bandwidth)
-                test_densities = [kde.pdf(kde_i, te_posteriors) for kde_i in mix_densities]
-
-                def neg_loglikelihood_prev(prev):
-                    test_mixture_likelihood = sum(prev_i * dens_i for prev_i, dens_i in zip(prev, test_densities))
-                    test_loglikelihood = np.log(test_mixture_likelihood + epsilon)
-                    return -np.sum(test_loglikelihood)
-
-                init_prev = np.full(fill_value=1 / n_classes, shape=(n_classes,))
-                pred_prev, likelihood = optim_minimize(neg_loglikelihood_prev, init_prev, return_loss=True)
-
-                xaxis.append(bandwidth)
-                ae_error.append(qp.error.ae(prev, pred_prev))
-                rae_error.append(qp.error.rae(prev, pred_prev))
-                mse_error.append(qp.error.mse(prev, pred_prev))
-                kld_error.append(qp.error.kld(prev, pred_prev))
-                likelihood_value.append(likelihood)
-
-            AE_error.append(ae_error)
-            RAE_error.append(rae_error)
-            MSE_error.append(mse_error)
-            KLD_error.append(kld_error)
-            LIKE_value.append(likelihood_value)
-
-        return xaxis, AE_error, RAE_error, MSE_error, KLD_error, LIKE_value
-
-    xaxis, AE_error, RAE_error, MSE_error, KLD_error, LIKE_value = qp.util.pickled_resource(
-        f'./plots/likelihood/pickles/{dataset}.pkl', generate_data)
-
-    for row in range(len(AE_error)):
-
-        # Crear la figura
-        # ----------------------------------------------------------------------------------------------------
-        fig, ax1 = plt.subplots(figsize=(8, 6))
-
-        # Pintar las series ae_error, rae_error, y kld_error en el primer eje Y
-        if show_ae:
-            ax1.plot(xaxis, AE_error[row], label='AE', marker='o', color='b')
-        if show_rae:
-            ax1.plot(xaxis, RAE_error[row], label='RAE', marker='s', color='g')
-        if show_kld:
-            ax1.plot(xaxis, KLD_error[row], label='KLD', marker='^', color='r')
-        if show_mse:
-            ax1.plot(xaxis, MSE_error[row], label='MSE', marker='^', color='c')
-        ax1.set_xscale('log')
-
-        # Configurar etiquetas para el primer eje Y
-        ax1.set_xlabel('Bandwidth')
-        ax1.set_ylabel('Error Value')
-        ax1.grid(True)
-        ax1.legend(loc='upper left')
-
-        # Crear un segundo eje Y que comparte el eje X
-        ax2 = ax1.twinx()
-
-        # Pintar likelihood_val en el segundo eje Y
-        ax2.plot(xaxis, LIKE_value[row], label='(neg)Likelihood', marker='x', color='purple')
-
-        # Configurar etiquetas para el segundo eje Y
-        ax2.set_ylabel('Likelihood Value')
-        ax2.legend(loc='upper right')
-
-        # Mostrar el gráfico
-        plt.title('Error Metrics vs Bandwidth')
-        # plt.show()
-        os.makedirs('./plots/likelihood/', exist_ok=True)
-        plt.savefig(f'./plots/likelihood/{dataset}-fig{row}.png')
-        plt.close()
-
-    # Crear la figura con las medias
+    # Test measurements
     # ----------------------------------------------------------------------------------------------------
-    fig, ax1 = plt.subplots(figsize=(8, 6))
+    measurements = []
+    measurement_names = []
+    if show_ae:
+        measurements.append(AE_error_te)
+        measurement_names.append('AE')
+    if show_rae:
+        measurements.append(RAE_error_te)
+        measurement_names.append('RAE')
+    if show_kld:
+        measurements.append(KLD_error_te)
+        measurement_names.append('KLD')
+    if show_mse:
+        measurements.append(MSE_error_te)
+        measurement_names.append('MSE')
+    measurements.append(normalize_metric(LIKE_value_te))
+    measurements.append(normalize_metric(LIKE_value_tr))
+    measurement_names.append('NLL(te)')
+    measurement_names.append('NLL(tr)')
 
-    def add_plot(ax, vals_error, name, color, marker, show):
-        if not show:
-            return
-        vals_error = np.asarray(vals_error)
-        vals_ave = np.mean(vals_error, axis=0)
-        vals_std = np.std(vals_error, axis=0)
-        ax.plot(xaxis, vals_ave, label=name, marker=marker, color=color)
-        ax.fill_between(xaxis, vals_ave - vals_std, vals_ave + vals_std, color=color, alpha=0.2)
+    if normalize:
+        measurements = [normalize_metric(m) for m in measurements]
 
-    add_plot(ax1, AE_error, 'AE', color='b', marker='o', show=show_ae)
-    add_plot(ax1, RAE_error, 'RAE', color='g', marker='s', show=show_rae)
-    add_plot(ax1, KLD_error, 'KLD', color='r', marker='^', show=show_kld)
-    add_plot(ax1, MSE_error, 'MSE', color='c', marker='^', show=show_mse)
-    ax1.set_xscale('log')
+    # plot(xaxis, measurements, measurement_names, suffix='AVE')
 
-    # Configurar etiquetas para el primer eje Y
-    ax1.set_xlabel('Bandwidth')
-    ax1.set_ylabel('Error Value')
-    ax1.grid(True)
-    ax1.legend(loc='upper left')
-
-    # Crear un segundo eje Y que comparte el eje X
-    ax2 = ax1.twinx()
-
-    # Pintar likelihood_val en el segundo eje Y
-    add_plot(ax2, LIKE_value, '(neg)Likelihood', color='purple', marker='x', show=True)
-
-    # Configurar etiquetas para el segundo eje Y
-    ax2.set_ylabel('Likelihood Value')
-    ax2.legend(loc='upper right')
-
-    # Mostrar el gráfico
-    plt.title('Error Metrics vs Bandwidth')
-    # plt.show()
-    os.makedirs('./plots/likelihood/', exist_ok=True)
-    plt.savefig(f'./plots/likelihood/{dataset}-figAve.png')
-    plt.close()
+    # Train-Test measurements
+    # ----------------------------------------------------------------------------------------------------
+    # measurements = []
+    # measurement_names = []
+    # measurements.append(normalize_metric(LIKE_value_te))
+    # measurements.append(normalize_metric(LIKE_value_tr))
+    # measurement_names.append('NLL(te)')
+    # measurement_names.append('NLL(tr)')
+    plot(xaxis, measurements, measurement_names, suffix='AVEtr')