1
0
Fork 0

qp.environment was not reachable within Parallel processes; changing backend to threading

This commit is contained in:
Alejandro Moreo Fernandez 2021-01-25 09:58:39 +01:00
parent e7527bd7ed
commit 2fda46fc13
4 changed files with 8 additions and 10 deletions

View File

@ -16,7 +16,6 @@ import argparse
import torch import torch
import shutil import shutil
qp.environ['SAMPLE_SIZE'] = settings.SAMPLE_SIZE
def newLR(): def newLR():
return LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1) return LogisticRegression(max_iter=1000, solver='lbfgs', n_jobs=-1)
@ -116,6 +115,8 @@ def save_results(dataset_name, model_name, optim_loss, *results):
def run(experiment): def run(experiment):
qp.environ['SAMPLE_SIZE'] = settings.SAMPLE_SIZE
optim_loss, dataset_name, (model_name, model, hyperparams) = experiment optim_loss, dataset_name, (model_name, model, hyperparams) = experiment
if is_already_computed(dataset_name, model_name, optim_loss=optim_loss): if is_already_computed(dataset_name, model_name, optim_loss=optim_loss):
@ -163,7 +164,8 @@ def run(experiment):
test=benchmark_eval.test, test=benchmark_eval.test,
sample_size=settings.SAMPLE_SIZE, sample_size=settings.SAMPLE_SIZE,
n_prevpoints=21, n_prevpoints=21,
n_repetitions=25 n_repetitions=25,
n_jobs=-1 if isinstance(model, qp.method.meta.Ensemble) else 1
) )
test_estim_prevalence = model.quantify(benchmark_eval.test.instances) test_estim_prevalence = model.quantify(benchmark_eval.test.instances)
test_true_prevalence = benchmark_eval.test.prevalence() test_true_prevalence = benchmark_eval.test.prevalence()

View File

@ -10,7 +10,6 @@ from . import model_selection
from . import classification from . import classification
from quapy.method.base import isprobabilistic, isaggregative from quapy.method.base import isprobabilistic, isaggregative
environ = { environ = {
'SAMPLE_SIZE': None, 'SAMPLE_SIZE': None,
'UNK_TOKEN': '[UNK]', 'UNK_TOKEN': '[UNK]',
@ -22,5 +21,3 @@ environ = {
def isbinary(x): def isbinary(x):
return x.binary return x.binary

View File

@ -1,8 +1,6 @@
import numpy as np import numpy as np
from sklearn.metrics import f1_score from sklearn.metrics import f1_score
import quapy as qp
def f1e(y_true, y_pred): def f1e(y_true, y_pred):
return 1. - f1_score(y_true, y_pred, average='macro') return 1. - f1_score(y_true, y_pred, average='macro')
@ -66,6 +64,7 @@ def smooth(p, eps):
def __check_eps(eps): def __check_eps(eps):
import quapy as qp
sample_size = qp.environ['SAMPLE_SIZE'] sample_size = qp.environ['SAMPLE_SIZE']
if eps is None: if eps is None:
if sample_size is None: if sample_size is None:

View File

@ -82,7 +82,7 @@ class Ensemble(BaseQuantifier):
is_static_policy = (self.policy in qp.error.QUANTIFICATION_ERROR_NAMES) is_static_policy = (self.policy in qp.error.QUANTIFICATION_ERROR_NAMES)
self.ensemble = Parallel(n_jobs=self.n_jobs)( self.ensemble = Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_delayed_new_instance)( delayed(_delayed_new_instance)(
self.base_quantifier, data, val_split, prev, posteriors, keep_samples=is_static_policy, self.base_quantifier, data, val_split, prev, posteriors, keep_samples=is_static_policy,
verbose=self.verbose, sample_size=sample_size verbose=self.verbose, sample_size=sample_size