119 lines
5.0 KiB
Python
119 lines
5.0 KiB
Python
import cv2
|
|
import numpy as np
|
|
|
|
import LFUtilities
|
|
import BEBLIDParameters
|
|
import ImageRecognitionSettings as settings
|
|
from line_profiler_pycharm import profile
|
|
import faiss
|
|
|
|
class BEBLIDRescorerFAISS:
|
|
|
|
def __init__(self):
|
|
#self.lf = LFUtilities.load(settings.DATASET_BEBLID)
|
|
#self.ids = np.loadtxt(settings.DATASET_IDS, dtype=str).tolist()
|
|
#self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
|
|
self.bf = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
|
|
|
|
def rescore_by_id(self, query_id, resultset):
|
|
#query_idx = self.ids.index(query_id)
|
|
query = LFUtilities.load_img_lf(settings.DATASET_LF_FOLDER, query_id)
|
|
return self.rescore_by_img(query, resultset)
|
|
|
|
@profile
|
|
def rescore_by_img(self, query, resultset):
|
|
max_inliers = -1
|
|
res = []
|
|
counter = 0
|
|
if len(query[0]) > BEBLIDParameters.MIN_GOOD_MATCHES:
|
|
for data_id, _ in resultset:
|
|
try:
|
|
#data_el = LFUtilities.loadz_img_lf(settings.DATASET_LF_FOLDER, data_id)
|
|
data_el = LFUtilities.unpickle_img_lf(settings.DATASET_LF_FOLDER, data_id)
|
|
|
|
if len(data_el[1]) > BEBLIDParameters.MIN_GOOD_MATCHES:
|
|
#nn_matches = self.bf.knnMatch(query[1], data_el[1], 2)
|
|
#good = [m for m, n in nn_matches if m.distance < BEBLIDParameters.NN_MATCH_RATIO * n.distance]
|
|
|
|
# Dimension of the vectors.
|
|
d = 256
|
|
|
|
# Vectors to be indexed, each represented by d / 8 bytes in a nb
|
|
# i.e. the i-th vector is db[i].
|
|
db = data_el[1]
|
|
|
|
# Vectors to be queried from the index.
|
|
queries = query[1]
|
|
|
|
# Initializing index.
|
|
#index = faiss.IndexBinaryFlat(d)
|
|
nbits = 64
|
|
index = faiss.IndexBinaryHash(d, nbits)
|
|
# index = faiss.IndexBinaryHNSW(d, 256)
|
|
|
|
# Adding the database vectors.
|
|
index.add(db)
|
|
|
|
# Number of nearest neighbors to retrieve per query vector.
|
|
k = 2
|
|
|
|
# Querying the index
|
|
index.nflip = 1
|
|
|
|
D, I = index.search(queries, k)
|
|
|
|
# D[i, j] contains the distance from the i-th query vector to its j-th nearest neighbor.
|
|
# I[i, j] contains the id of the j-th nearest neighbor of the i-th query vector.
|
|
|
|
f_good = (D[:, 0] < BEBLIDParameters.NN_MATCH_RATIO * D[:, 1])
|
|
Qgood = np.asarray(np.nonzero(f_good))[0]
|
|
Igood = I[f_good, 0]
|
|
|
|
if Qgood.size > BEBLIDParameters.MIN_GOOD_MATCHES:
|
|
# src_pts = np.float32([query[0][m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
|
|
#dst_pts = np.float32([data_el[0][m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
|
|
# dst_pts = data_el[0][[m.trainIdx for m in good]].reshape(-1, 1, 2)
|
|
|
|
src_pts = np.float32([query[0][m].pt for m in Qgood]).reshape(-1, 1, 2)
|
|
#dst_pts = np.float32([data_el[0][m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
|
|
dst_pts = data_el[0][[m for m in Igood]].reshape(-1, 1, 2)
|
|
|
|
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
|
|
matches_mask = mask.ravel().tolist()
|
|
# print(len(good))
|
|
inliers = np.count_nonzero(matches_mask)
|
|
# print(inliers)
|
|
if (inliers >= BEBLIDParameters.MIN_INLIERS and inliers > max_inliers):
|
|
max_inliers = inliers
|
|
res.append((data_id, round(inliers/Qgood.size, 3)))
|
|
print(data_id)
|
|
print(f'candidate n. {counter}')
|
|
#to get just the first candidate
|
|
break
|
|
except Exception as e:
|
|
print('rescore error evaluating ' + data_id)
|
|
print(e)
|
|
pass
|
|
counter += 1
|
|
|
|
if res:
|
|
res.sort(key=lambda result: result[1], reverse=True)
|
|
return res
|
|
|
|
def add(self, lf):
|
|
self.lf.append(lf)
|
|
|
|
def remove(self, idx):
|
|
self.descs = np.delete(self.descs, idx, axis=0)
|
|
|
|
def save(self, is_backup=False):
|
|
lf_save_file = settings.DATASET_LF
|
|
ids_file = settings.DATASET_IDS_LF
|
|
if lf_save_file != "None":
|
|
if is_backup:
|
|
lf_save_file += '.bak'
|
|
ids_file += '.bak'
|
|
|
|
LFUtilities.save(lf_save_file, self.lf)
|
|
np.savetxt(ids_file, self.ids, fmt='%s')
|