Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions opensfm/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

import cv2
import numpy as np
import torch
from opensfm import context, pyfeatures


Expand Down Expand Up @@ -548,6 +549,21 @@ def extract_features_orb(
return points, desc


def extract_features_xfeat(
image: np.ndarray, config: Dict[str, Any], features_count: int, xfeat: Any
) -> Tuple[np.ndarray, np.ndarray]:
logger.debug("Computing XFeats")
t = time.time()

output = xfeat.detectAndCompute(image, top_k = 4096)[0]
points = output['keypoints'].numpy()
scores = output['scores'].numpy().reshape((4096, 1))
points = np.hstack((points, scores))
desc = output['descriptors'].numpy()

logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
return points, desc

def extract_features(
image: np.ndarray, config: Dict[str, Any], is_panorama: bool
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
Expand Down Expand Up @@ -606,6 +622,13 @@ def extract_features(
points, desc = extract_features_hahog(image_gray, config, features_count)
elif feature_type == "ORB":
points, desc = extract_features_orb(image_gray, config, features_count)
elif feature_type == 'XFEAT':
xfeat = torch.hub.load(
'verlab/accelerated_features',
'XFeat',
pretrained = True,
top_k = 4096)
points, desc = extract_features_xfeat(image, config, features_count, xfeat)
else:
raise ValueError("Unknown feature type "
+ "(must be SURF, SIFT, AKAZE, HAHOG or ORB)")
Expand Down
102 changes: 102 additions & 0 deletions opensfm/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@

import cv2
import numpy as np
import torch
from opensfm import (
context,
feature_loader,
io,
log,
multiview,
pairs_selection,
Expand Down Expand Up @@ -434,6 +436,23 @@ def _match_descriptors_impl(
matches = match_brute_force_symmetric(d1, d2, overriden_config)
else:
matches = match_brute_force(d1, d2, overriden_config)
elif matcher_type == "LIGHTGLUE":
assert not symmetric_matching
# assume im1.shape == im2.shape
im = data.load_image(im1)
xfeat = torch.hub.load(
'verlab/accelerated_features',
'XFeat',
pretrained = True,
top_k = 4096)
match_lightglue(
features_data1.points,
d1,
features_data2.points,
d2,
im.shape,
overriden_config,
xfeat = xfeat)
else:
raise ValueError("Invalid matcher_type: {}".format(matcher_type))

Expand Down Expand Up @@ -748,6 +767,89 @@ def match_brute_force(
return _convert_matches_to_vector(good_matches)


def match_lightglue(
p1: np.ndarray,
d1: np.ndarray,
p2: np.ndarray,
d2: np.ndarray,
shape: np.ndarray,
config: Dict[str, Any],
maskij: Optional[np.ndarray] = None,
xfeat: Any = None,
) -> List[Tuple[int, int]]:
"""LighterGlue feature matching from https://github.com/verlab/accelerated_features

Args:
p1: feature keypoints of the first image
d1: feature descriptors of the first image
p2: feature keypoints of the second image
d2: feature descriptors of the second image
shape: shape of original image
config: config parameters
maskij: optional boolean mask of len(i descriptors) x len(j descriptors)
xfeat: XFeat model
"""
assert(xfeat is not None)

def _kpt_idxs(output, mkpts):
m = {}

for i, p in enumerate(mkpts):
x = np.floor(float(p[0]))
y = np.floor(float(p[1]))
m.setdefault(x, {})
m[x][y] = -1

c = 0

for i, p in enumerate(output['keypoints']):
x = np.floor(float(p[0]))
y = np.floor(float(p[1]))
if x not in m: continue
if y not in m[x]: continue
m[x][y] = i
c += 1

assert(c == len(mkpts))

idxs = []
for p in mkpts:
x = np.floor(float(p[0]))
y = np.floor(float(p[1]))
idxs.append(m[x][y])

return idxs

extraction_size = (
config["feature_process_size_panorama"]
if is_panorama
else config["feature_process_size"]
)

h, w = shape[:2]
size = max(w, h)
final_size = (h, w)
if 0 < extraction_size < size:
final_size = h * max_size // size, w * max_size // size

output0 = {
'keypoints': p1,
'descriptors': d1,
'image_size': final_size,
}
output1 = {
'keypoints': p2,
'descriptors': d2,
'image_size': final_size,
}
mkpts_0, mkpts_1 = xfeat.match_lighterglue(output0, output1)
idxs_0 = _kpt_idxs(output0, mkpts_0)
idxs_1 = _kpt_idxs(output1, mkpts_1)

# TODO can we justreturn the iterator?
return list(zip(idxs_0, idxs_1))


def _convert_matches_to_vector(matches: List[Any]) -> List[Tuple[int, int]]:
"""Convert Dmatch object to matrix form."""
return [(mm.queryIdx, mm.trainIdx) for mm in matches]
Expand Down