Compare commits

..

No commits in common. "1.1" and "master" have entirely different histories.
1.1 ... master

4 changed files with 177 additions and 2 deletions

View File

@ -1,4 +1,3 @@
# image-recognition
SWOADS Project
Release 1.1
SWOADS Project

81
src/BEBLIDRescorerDB.py Normal file
View File

@ -0,0 +1,81 @@
import cv2
import numpy as np
import LFUtilities
import BEBLIDParameters
import ImageRecognitionSettings as settings
from LFDB import LFDB
class BEBLIDRescorerDB:
def __init__(self):
#self.lf = LFUtilities.load(settings.DATASET_BEBLID)
#self.ids = np.loadtxt(settings.DATASET_IDS, dtype=str).tolist()
#self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
self.bf = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
self.lf_db = LFDB(settings.DB_LF)
def rescore_by_id(self, query_id, resultset):
#query_idx = self.ids.index(query_id)
query = LFUtilities.load_img_lf(settings.DATASET_LF_FOLDER, query_id)
return self.rescore_by_img(query, resultset)
def rescore_by_img(self, query, resultset):
max_inliers = -1
res = []
counter = 0
if len(query[0]) > 0:
for data_id, _ in resultset:
try:
blob = self.lf_db.get(data_id)
serialized_obj = LFUtilities.deserialize_object(blob)
data_el = LFUtilities.unpickle_keypoints(serialized_obj)
if len(data_el[1]) > 0:
nn_matches = self.bf.knnMatch(query[1], data_el[1], 2)
good = [m for m, n in nn_matches if m.distance < BEBLIDParameters.NN_MATCH_RATIO * n.distance]
if len(good) > BEBLIDParameters.MIN_GOOD_MATCHES:
src_pts = np.float32([query[0][m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([data_el[0][m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3.0)
matches_mask = mask.ravel().tolist()
# print(len(good))
inliers = np.count_nonzero(matches_mask)
# print(inliers)
if (inliers >= BEBLIDParameters.MIN_INLIERS and inliers > max_inliers):
max_inliers = inliers
res.append((data_id, round(inliers/len(good), 3)))
print(data_id)
print(f'candidate n. {counter}')
#to get just the first candidate
break
except Exception as e:
print('rescore error evaluating ' + data_id)
print(e)
pass
counter += 1
if res:
res.sort(key=lambda result: result[1], reverse=True)
return res
def add(self, lf):
self.lf.append(lf)
def remove(self, idx):
self.descs = np.delete(self.descs, idx, axis=0)
def save(self, is_backup=False):
lf_save_file = settings.DATASET_LF
ids_file = settings.DATASET_IDS_LF
if lf_save_file != "None":
if is_backup:
lf_save_file += '.bak'
ids_file += '.bak'
LFUtilities.save(lf_save_file, self.lf)
np.savetxt(ids_file, self.ids, fmt='%s')

View File

@ -0,0 +1,40 @@
from pathlib import Path
import tqdm
import LFUtilities
import BEBLIDExtractorQ as lf
import argparse
import os
from LFDB import LFDB
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LF bulk extraction')
parser.add_argument('src', type=str, help='img src folder path')
parser.add_argument('dest', type=str, help='LF DB file')
args = parser.parse_args()
src = args.src
dest = args.dest
lf_db = LFDB(dest)
paths = Path(src).rglob('*.*')
paths_list = list(paths)
print('Extracting lf...')
for path in tqdm.tqdm(paths_list):
try:
kp, des = lf.extract(os.path.join(path.parent, path.name))
features = LFUtilities.pickle_keypoints(kp, des)
blob = LFUtilities.serialize_object(features)
filename = os.path.splitext(path.name)[0]
lf_db.put(filename, blob)
except Exception as e:
print("cannot process '%s'" % path)
print(e)
pass
lf_db.commit()
lf_db.close()
print('lf extracted.')

55
src/LFDB.py Normal file
View File

@ -0,0 +1,55 @@
import os
import sqlite3
from sqlite3 import Error
from werkzeug.datastructures import FileStorage
class LFDB:
def __init__(self, db_path):
# self.lf = LFUtilities.load(settings.DATASET_BEBLID)
# self.ids = np.loadtxt(settings.DATASET_IDS, dtype=str).tolist()
# self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
self.conn = sqlite3.connect(db_path, check_same_thread=False)
def close(self):
if self.conn:
self.conn.close()
def put(self, docId, features):
try:
self.conn.text_factory = str
#print("[INFO] : Successful connection!")
cur = self.conn.cursor()
insert_file = '''INSERT INTO lf(docId, features) VALUES(?, ?)'''
cur = self.conn.cursor()
cur.execute(insert_file, (docId, features,))
#print("[INFO] : The blob for ", docId, " is in the database.")
except Error as e:
print(e)
def commit(self):
try:
if self.conn:
self.conn.commit()
print("committing...")
except Error as e:
print(e)
def get(self, docId):
try:
self.conn.text_factory = str
cur = self.conn.cursor()
# print("[INFO] : Connected to SQLite to read_blob_data")
sql_fetch_blob_query = """SELECT * from lf where docId = ?"""
cur.execute(sql_fetch_blob_query, (docId,))
record = cur.fetchall()
for row in record:
converted_file_name = row[1]
blob = row[2]
# parse out the file name from converted_file_name
cur.close()
except sqlite3.Error as error:
print("[INFO] : Failed to read blob data from sqlite table", error)
return blob