From d1ebb809d686b67dc2ee637c18ae4edc7853ca78 Mon Sep 17 00:00:00 2001 From: Tobias Fischer Date: Wed, 2 Jun 2021 15:42:37 +1000 Subject: [PATCH 1/3] Fixes for pypi/conda release --- .gitignore | 1 + feature_extract.py | 2 +- feature_match.py | 18 +++++++++++++----- match_two.py | 2 +- patchnetvlad/models/local_matcher.py | 8 ++++---- patchnetvlad/tools/datasets.py | 6 ++++++ setup.py | 18 +++++++++++++----- 7 files changed, 39 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index dbf8bfd..ed7e9c3 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ patchnetvlad/pretrained_models/* patchnetvlad/results/* patchnetvlad/output_features/* +results/* ## Python stuff # Byte-compiled / optimized / DLL files diff --git a/feature_extract.py b/feature_extract.py index 8a80830..a70f27d 100755 --- a/feature_extract.py +++ b/feature_extract.py @@ -164,7 +164,7 @@ def main(): torch.cuda.empty_cache() # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the # memory after runs - print('Done. Finished extracting and saving features') + print('\n\nDone. Finished extracting and saving features') if __name__ == "__main__": diff --git a/feature_match.py b/feature_match.py index 3ce860c..ae8f408 100755 --- a/feature_match.py +++ b/feature_match.py @@ -73,7 +73,9 @@ def compute_recall(gt, predictions, numQ, n_values, recall_str=''): def write_kapture_output(opt, eval_set, predictions, outfile_name): if not exists(opt.result_save_folder): os.mkdir(opt.result_save_folder) - with open(join(opt.result_save_folder, outfile_name), 'w') as kap_out: + outfile = join(opt.result_save_folder, outfile_name) + print('Writing results to', outfile) + with open(outfile, 'w') as kap_out: kap_out.write('# kapture format: 1.0\n') kap_out.write('# query_image, map_image\n') image_list_array = np.array(eval_set.images) @@ -87,7 +89,9 @@ def write_kapture_output(opt, eval_set, predictions, outfile_name): def write_recalls_output(opt, recalls_netvlad, recalls_patchnetvlad, n_values): if not exists(opt.result_save_folder): os.mkdir(opt.result_save_folder) - with open(join(opt.result_save_folder, 'recalls.txt'), 'w') as rec_out: + outfile = join(opt.result_save_folder, 'recalls.txt') + print('Writing recalls to', outfile) + with open(outfile, 'w') as rec_out: for n in n_values: rec_out.write("Recall {}@{}: {:.4f}\n".format('NetVLAD', n, recalls_netvlad[n])) for n in n_values: @@ -120,7 +124,8 @@ def feature_match(eval_set, device, opt, config): if config['feature_match']['pred_input_path'] != 'None': predictions = np.load(config['feature_match']['pred_input_path']) # optionally load predictions from a np file else: - if opt.ground_truth_path.split('/')[1][:-4] == 'tokyo247': + if opt.ground_truth_path and 'tokyo247' in opt.ground_truth_path: + print('Tokyo24/7: Selecting only one of the 12 cutouts per panorama') # followed nnSearchPostprocess in https://github.com/Relja/netvlad/blob/master/datasets/dbTokyo247.m # noinspection PyArgumentList _, predictions = faiss_index.search(qFeat, max(n_values) * 12) # 12 cutouts per panorama @@ -133,7 +138,7 @@ def feature_match(eval_set, device, opt, config): predictions = np.array(predictions_new) else: # noinspection PyArgumentList - _, predictions = faiss_index.search(qFeat, max(n_values)) + _, predictions = faiss_index.search(qFeat, min(len(qFeat), max(n_values))) reranked_predictions = local_matcher(predictions, eval_set, input_query_local_features_prefix, input_index_local_features_prefix, config, device) @@ -142,16 +147,19 @@ def feature_match(eval_set, device, opt, config): write_kapture_output(opt, eval_set, predictions, 'NetVLAD_predictions.txt') write_kapture_output(opt, eval_set, reranked_predictions, 'PatchNetVLAD_predictions.txt') - print('Finished matching features. About to eval GT if GT was provided') + print('Finished matching features.') # for each query get those within threshold distance if opt.ground_truth_path is not None: + print('Calculating recalls using ground truth.') gt = eval_set.get_positives() global_recalls = compute_recall(gt, predictions, eval_set.numQ, n_values, 'NetVLAD') local_recalls = compute_recall(gt, reranked_predictions, eval_set.numQ, n_values, 'PatchNetVLAD') write_recalls_output(opt, global_recalls, local_recalls, n_values) + else: + print('No ground truth was provided; not calculating recalls.') def main(): diff --git a/match_two.py b/match_two.py index bf7360e..7e62da8 100755 --- a/match_two.py +++ b/match_two.py @@ -158,7 +158,7 @@ def match_two(model, device, opt, config): scores, inlier_keypoints_one, inlier_keypoints_two = matcher.match(local_feats_one, local_feats_two) score = -normalise_func(scores, len(patch_sizes), patch_weights) - print("Similarity score between the two images is: '{:.5f}'. In this example, a larger score indicates a better match.".format(score)) + print(f"Similarity score between the two images is: {score:.5f}. Larger scores indicate better matches.") if config['feature_match']['matcher'] == 'RANSAC': tqdm.write('====> Plotting Local Features') diff --git a/patchnetvlad/models/local_matcher.py b/patchnetvlad/models/local_matcher.py index 1bfaa51..3e0d7ec 100644 --- a/patchnetvlad/models/local_matcher.py +++ b/patchnetvlad/models/local_matcher.py @@ -149,17 +149,17 @@ def local_matcher(predictions, eval_set, input_query_local_features_prefix, for q_idx, pred in enumerate(tqdm(predictions, leave=False, desc='Patch compare pred')): diffs = np.zeros((predictions.shape[1], len(patch_sizes))) - image_name = os.path.splitext(os.path.basename(eval_set.images[eval_set.numDb + q_idx]))[0] + image_name_query = os.path.splitext(os.path.basename(eval_set.images[eval_set.numDb + q_idx]))[0] qfeat = [] for patch_size in patch_sizes: - qfilename = input_query_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name + '.npy' + qfilename = input_query_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_query + '.npy' qfeat.append(torch.transpose(torch.tensor(np.load(qfilename), device=device), 0, 1)) # we pre-transpose here to save compute speed for k, candidate in enumerate(pred): - image_name = os.path.splitext(os.path.basename(eval_set.images[candidate]))[0] + image_name_index = os.path.splitext(os.path.basename(eval_set.images[candidate]))[0] dbfeat = [] for patch_size in patch_sizes: - dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name + '.npy' + dbfilename = input_index_local_features_prefix + '_' + 'psize{}_'.format(patch_size) + image_name_index + '.npy' dbfeat.append(torch.tensor(np.load(dbfilename), device=device)) diffs[k, :], _, _ = matcher.match(qfeat, dbfeat) diff --git a/patchnetvlad/tools/datasets.py b/patchnetvlad/tools/datasets.py index ae17bbc..31de322 100644 --- a/patchnetvlad/tools/datasets.py +++ b/patchnetvlad/tools/datasets.py @@ -33,6 +33,8 @@ from PIL import Image from sklearn.neighbors import NearestNeighbors +from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR + class PlaceDataset(data.Dataset): def __init__(self, query_file_path, index_file_path, dataset_root_dir, ground_truth_path, config): @@ -52,6 +54,10 @@ def __init__(self, query_file_path, index_file_path, dataset_root_dir, ground_tr self.images = self.database self.images = [os.path.join(dataset_root_dir, image) for image in self.images] + # check if images are relative to root dir + if not os.path.isfile(self.images[0]): + if os.path.isfile(os.path.join(PATCHNETVLAD_ROOT_DIR, self.images[0])): + self.images = [os.path.join(PATCHNETVLAD_ROOT_DIR, image) for image in self.images] self.positives = None self.distances = None diff --git a/setup.py b/setup.py index 9902fbc..098cde4 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -import os +import os, sys from setuptools import setup, find_packages @@ -9,6 +9,17 @@ long_description = f.read() +install_require_list = [ + 'numpy', 'torch', 'torchvision', + 'tqdm', 'scipy', 'Pillow', 'scikit-learn', + 'faiss', 'natsort'] + +# workaround as opencv-python does not show up in "pip list" within a conda environment +# we do not care as conda recipe has py-opencv requirement anyhow +is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta')) +if not is_conda: + install_require_list.append('opencv-python') + setup(name='patchnetvlad', version='0.1.0', description='Patch-NetVLAD: An open-source Python implementation of the CVPR2021 paper', @@ -37,10 +48,7 @@ 'Programming Language :: Python :: 3.9', ], python_requires='>=3.6', - install_requires=[ - 'numpy', 'torch', 'torchvision', - 'tqdm', 'scipy', 'Pillow', 'scikit-learn', - 'faiss', 'natsort', 'opencv-python'], + install_requires=install_require_list, packages=find_packages(), keywords=[ 'python', 'place recognition', 'image retrieval', 'computer vision', 'robotics' From a3a1a8e1c65d23c801ffcd27552b75a883699d1d Mon Sep 17 00:00:00 2001 From: Tobias Fischer Date: Thu, 3 Jun 2021 08:49:44 +1000 Subject: [PATCH 2/3] Create python-publish.yml --- .github/workflows/python-publish.yml | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/workflows/python-publish.yml diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..0a80fdb --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,29 @@ +# This workflows will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Upload Python Package + +on: + release: + types: [created] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build and publish + env: + TWINE_USERNAME: __token__ + TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* From 2177ecbe1087749969b184bc1c2cff5ab9ccaee5 Mon Sep 17 00:00:00 2001 From: Tobias Fischer Date: Thu, 3 Jun 2021 08:50:03 +1000 Subject: [PATCH 3/3] Bump version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 098cde4..e8f6c30 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ install_require_list.append('opencv-python') setup(name='patchnetvlad', - version='0.1.0', + version='0.1.1', description='Patch-NetVLAD: An open-source Python implementation of the CVPR2021 paper', long_description = long_description, long_description_content_type='text/markdown',