114 lines
4.1 KiB
Python
114 lines
4.1 KiB
Python
import os
|
|
from os.path import join
|
|
import pandas as pd
|
|
|
|
from LeQua2024.scripts.data import load_vector_documents
|
|
from LeQua2024.scripts.constants import SAMPLE_SIZE
|
|
from quapy.data.base import LabelledCollection
|
|
import sys
|
|
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
|
|
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
|
|
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), './')))
|
|
#from LeQua2024.scripts import constants
|
|
#from LeQua2024._lequa2024 import fetch_lequa2024
|
|
import quapy as qp
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
# import seaborn as sns
|
|
from pathlib import Path
|
|
import glob
|
|
from commons import *
|
|
|
|
|
|
for TASK in [1,2,4]:
|
|
qp.environ['SAMPLE_SIZE']=SAMPLE_SIZE[f'T{TASK}']
|
|
|
|
true_prevs_path = f'../TruePrevalences/T{TASK}.test_prevalences/T{TASK}/public/test_prevalences.txt'
|
|
folder = F'../Results_CODALAB_2024/extracted/TASK_{TASK}'
|
|
|
|
method_files = glob.glob(f"{folder}/*.csv")
|
|
|
|
desired_order = desired_order_dict[TASK]
|
|
|
|
# load the true values (sentiment prevalence, domain prevalence)
|
|
true_id, true_prevs = load_result_file(true_prevs_path)
|
|
|
|
# define the loss for evaluation
|
|
error_name = 'RAE'
|
|
error_log = False
|
|
|
|
if error_name == 'RAE':
|
|
err_function_ = qp.error.rae
|
|
elif error_name == 'AE':
|
|
err_function_ = qp.error.ae
|
|
else:
|
|
raise ValueError()
|
|
|
|
if error_log:
|
|
error_name = f'log({error_name})'
|
|
err_function = lambda x,y: np.log(err_function_(x,y))
|
|
else:
|
|
err_function = err_function_
|
|
|
|
|
|
#train_prevalence = fetch_lequa2024(task=f'T{TASK}', data_home='./data')
|
|
train = LabelledCollection.load(f'../data/lequa2024/T{TASK}/public/training_data.txt', loader_func=load_vector_documents)
|
|
train_prev = train.prevalence()
|
|
#train_prev = np.tile(train_prev, (len(true_id),1))
|
|
|
|
from quapy.plot import error_by_drift, binary_diagonal
|
|
|
|
# load the participant and baseline results
|
|
method_names, estim_prevs = [], []
|
|
for method_file in method_files:
|
|
method_name = Path(method_file).name.replace('.csv', '')
|
|
# if method_name in exclude_methods:
|
|
# continue
|
|
id, method_prevs = load_result_file(join(folder, method_name+'.csv'))
|
|
assert (true_id == id).all(), f'unmatched files for {method_file}'
|
|
method_name = method_names_nice.get(method_name, method_name)
|
|
if method_name not in desired_order:
|
|
print(f'method {method_name} unknown')
|
|
raise ValueError()
|
|
method_names.append(method_name)
|
|
estim_prevs.append(method_prevs)
|
|
|
|
plt.rcParams['figure.figsize'] = [14, 6]
|
|
plt.rcParams['figure.dpi'] = 200
|
|
plt.rcParams['font.size'] = 15
|
|
|
|
true_prevs = [true_prevs]*len(method_names)
|
|
savepath = f'./t{TASK}_diagonal.png'
|
|
if TASK in [1,4]:
|
|
binary_diagonal(method_names, true_prevs, estim_prevs, pos_class=1, title=None, show_std=True, legend=True,
|
|
train_prev=train.prevalence(), savepath=savepath, method_order=desired_order)
|
|
|
|
|
|
box_to_ancor={
|
|
1: (0.88,0.1),
|
|
2: (0.9,0.15),
|
|
4: (0.9, 0.15),
|
|
}
|
|
|
|
tr_prevs =[train.prevalence()]*len(method_names)
|
|
savepath = f'./t{TASK}_{error_name}_pps.png'
|
|
binary=TASK in [1,4]
|
|
if binary:
|
|
print(f'{TASK=} has positive prevalence = {train.prevalence()[1]}')
|
|
error_by_drift(method_names,
|
|
true_prevs,
|
|
estim_prevs,
|
|
tr_prevs,
|
|
title=None,
|
|
y_error_name='rae',
|
|
x_error_name='bias_binary' if binary else 'ae',
|
|
x_axis_title=f'PPS between training set and test sample (in terms of bias)' if binary else None,
|
|
show_std=False,
|
|
n_bins=25,
|
|
logscale=True if binary else False,
|
|
show_density=True,
|
|
method_order=desired_order,
|
|
vlines=list(train.prevalence()) if binary else None,
|
|
bbox_to_anchor=box_to_ancor[TASK],
|
|
savepath=savepath)
|