import argparse import torch from pathlib import Path from src.extractor import ViTExtractor from tqdm import tqdm import numpy as np from sklearn.cluster import KMeans from PIL import Image import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from typing import List, Tuple def find_correspondences_bop(image_path1, image_path2, extractor, num_pairs: int = 10, load_size: int = 224, layer: int = 9,#9, facet: str = 'key', bin: bool = True, thresh: float = 0.05, model_type: str = 'dino_vits8', stride: int = 4) -> Tuple[List[Tuple[float, float]], List[Tuple[float, float]], Image.Image, Image.Image]: """ finding point correspondences between two images. :param image_path1: path to the first image. :param image_path2: path to the second image. :param num_pairs: number of outputted corresponding pairs. :param load_size: size of the smaller edge of loaded images. If None, does not resize. :param layer: layer to extract descriptors from. :param facet: facet to extract descriptors from. :param bin: if True use a log-binning descriptor. :param thresh: threshold of saliency maps to distinguish fg and bg. :param model_type: type of model to extract descriptors from. :param stride: stride of the model. :return: list of points from image_path1, list of corresponding points from image_path2, the processed pil image of image_path1, and the processed pil image of image_path2. """ # extracting descriptors for each image device = 'cuda' if torch.cuda.is_available() else 'cpu' # extractor = ViTExtractor(model_type, stride, device=device) image1_batch, image1_pil = extractor.preprocess(image_path1, load_size) descriptors1 = extractor.extract_descriptors(image1_batch.to(device), layer, facet, bin) num_patches1, load_size1 = extractor.num_patches, extractor.load_size image2_batch, image2_pil = extractor.preprocess(image_path2, load_size) descriptors2 = extractor.extract_descriptors(image2_batch.to(device), layer, facet, bin) num_patches2, load_size2 = extractor.num_patches, extractor.load_size # extracting saliency maps for each image saliency_map1 = extractor.extract_saliency_maps(image1_batch.to(device))[0] saliency_map2 = extractor.extract_saliency_maps(image2_batch.to(device))[0] # threshold saliency maps to get fg / bg masks fg_mask1 = saliency_map1 > thresh fg_mask2 = saliency_map2 > thresh # calculate similarity between image1 and image2 descriptors similarities = chunk_cosine_sim(descriptors1, descriptors2) # calculate best buddies image_idxs = torch.arange(num_patches1[0] * num_patches1[1], device=device) sim_1, nn_1 = torch.max(similarities, dim=-1) # nn_1 - indices of block2 closest to block1 sim_2, nn_2 = torch.max(similarities, dim=-2) # nn_2 - indices of block1 closest to block2 sim_1, nn_1 = sim_1[0, 0], nn_1[0, 0] sim_2, nn_2 = sim_2[0, 0], nn_2[0, 0] bbs_mask = nn_2[nn_1] == image_idxs # remove best buddies where at least one descriptor is marked bg by saliency mask. fg_mask2_new_coors = nn_2[fg_mask2] fg_mask2_mask_new_coors = torch.zeros(num_patches1[0] * num_patches1[1], dtype=torch.bool, device=device) fg_mask2_mask_new_coors[fg_mask2_new_coors] = True bbs_mask = torch.bitwise_and(bbs_mask, fg_mask1) bbs_mask = torch.bitwise_and(bbs_mask, fg_mask2_mask_new_coors) # applying k-means to extract k high quality well distributed correspondence pairs bb_descs1 = descriptors1[0, 0, bbs_mask, :].cpu().numpy() bb_descs2 = descriptors2[0, 0, nn_1[bbs_mask], :].cpu().numpy() # apply k-means on a concatenation of a pairs descriptors. all_keys_together = np.concatenate((bb_descs1, bb_descs2), axis=1) n_clusters = min(num_pairs, len(all_keys_together)) # if not enough pairs, show all found pairs. length = np.sqrt((all_keys_together ** 2).sum(axis=1))[:, None] normalized = all_keys_together / length kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(normalized) bb_topk_sims = np.full((n_clusters), -np.inf) bb_indices_to_show = np.full((n_clusters), -np.inf) # rank pairs by their mean saliency value bb_cls_attn1 = saliency_map1[bbs_mask] bb_cls_attn2 = saliency_map2[nn_1[bbs_mask]] bb_cls_attn = (bb_cls_attn1 + bb_cls_attn2) / 2 ranks = bb_cls_attn for k in range(n_clusters): for i, (label, rank) in enumerate(zip(kmeans.labels_, ranks)): if rank > bb_topk_sims[label]: bb_topk_sims[label] = rank bb_indices_to_show[label] = i # get coordinates to show indices_to_show = torch.nonzero(bbs_mask, as_tuple=False).squeeze(dim=1)[ bb_indices_to_show] # close bbs img1_indices_to_show = torch.arange(num_patches1[0] * num_patches1[1], device=device)[indices_to_show] img2_indices_to_show = nn_1[indices_to_show] # coordinates in descriptor map's dimensions img1_y_to_show = (img1_indices_to_show / num_patches1[1]).cpu().numpy() img1_x_to_show = (img1_indices_to_show % num_patches1[1]).cpu().numpy() img2_y_to_show = (img2_indices_to_show / num_patches2[1]).cpu().numpy() img2_x_to_show = (img2_indices_to_show % num_patches2[1]).cpu().numpy() points1, points2 = [], [] for y1, x1, y2, x2 in zip(img1_y_to_show, img1_x_to_show, img2_y_to_show, img2_x_to_show): x1_show = (int(x1) - 1) * extractor.stride[1] + extractor.stride[1] + extractor.p // 2 y1_show = (int(y1) - 1) * extractor.stride[0] + extractor.stride[0] + extractor.p // 2 x2_show = (int(x2) - 1) * extractor.stride[1] + extractor.stride[1] + extractor.p // 2 y2_show = (int(y2) - 1) * extractor.stride[0] + extractor.stride[0] + extractor.p // 2 points1.append((y1_show, x1_show)) points2.append((y2_show, x2_show)) return points1, points2, image1_pil, image2_pil def find_correspondences(image_path1: str, image_path2: str, num_pairs: int = 10, load_size: int = 224, layer: int = 9, facet: str = 'key', bin: bool = True, thresh: float = 0.05, model_type: str = 'dino_vits8', stride: int = 4) -> Tuple[List[Tuple[float, float]], List[Tuple[float, float]], Image.Image, Image.Image]: """ finding point correspondences between two images. :param image_path1: path to the first image. :param image_path2: path to the second image. :param num_pairs: number of outputted corresponding pairs. :param load_size: size of the smaller edge of loaded images. If None, does not resize. :param layer: layer to extract descriptors from. :param facet: facet to extract descriptors from. :param bin: if True use a log-binning descriptor. :param thresh: threshold of saliency maps to distinguish fg and bg. :param model_type: type of model to extract descriptors from. :param stride: stride of the model. :return: list of points from image_path1, list of corresponding points from image_path2, the processed pil image of image_path1, and the processed pil image of image_path2. """ # extracting descriptors for each image device = 'cuda' if torch.cuda.is_available() else 'cpu' #device = 'cpu' extractor = ViTExtractor(model_type, stride, device=device) image1_batch, image1_pil = extractor.preprocess(image_path1, load_size) descriptors1 = extractor.extract_descriptors(image1_batch.to(device), layer, facet, bin) num_patches1, load_size1 = extractor.num_patches, extractor.load_size image2_batch, image2_pil = extractor.preprocess(image_path2, load_size) descriptors2 = extractor.extract_descriptors(image2_batch.to(device), layer, facet, bin) num_patches2, load_size2 = extractor.num_patches, extractor.load_size # extracting saliency maps for each image saliency_map1 = extractor.extract_saliency_maps(image1_batch.to(device))[0] saliency_map2 = extractor.extract_saliency_maps(image2_batch.to(device))[0] # threshold saliency maps to get fg / bg masks fg_mask1 = saliency_map1 > thresh fg_mask2 = saliency_map2 > thresh # calculate similarity between image1 and image2 descriptors similarities = chunk_cosine_sim(descriptors1, descriptors2) # calculate best buddies image_idxs = torch.arange(num_patches1[0] * num_patches1[1], device=device) sim_1, nn_1 = torch.max(similarities, dim=-1) # nn_1 - indices of block2 closest to block1 sim_2, nn_2 = torch.max(similarities, dim=-2) # nn_2 - indices of block1 closest to block2 sim_1, nn_1 = sim_1[0, 0], nn_1[0, 0] sim_2, nn_2 = sim_2[0, 0], nn_2[0, 0] bbs_mask = nn_2[nn_1] == image_idxs # remove best buddies where at least one descriptor is marked bg by saliency mask. fg_mask2_new_coors = nn_2[fg_mask2] fg_mask2_mask_new_coors = torch.zeros(num_patches1[0] * num_patches1[1], dtype=torch.bool, device=device) fg_mask2_mask_new_coors[fg_mask2_new_coors] = True bbs_mask = torch.bitwise_and(bbs_mask, fg_mask1) bbs_mask = torch.bitwise_and(bbs_mask, fg_mask2_mask_new_coors) # applying k-means to extract k high quality well distributed correspondence pairs bb_descs1 = descriptors1[0, 0, bbs_mask, :].cpu().numpy() bb_descs2 = descriptors2[0, 0, nn_1[bbs_mask], :].cpu().numpy() # apply k-means on a concatenation of a pairs descriptors. all_keys_together = np.concatenate((bb_descs1, bb_descs2), axis=1) n_clusters = min(num_pairs, len(all_keys_together)) # if not enough pairs, show all found pairs. length = np.sqrt((all_keys_together ** 2).sum(axis=1))[:, None] normalized = all_keys_together / length kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(normalized) bb_topk_sims = np.full((n_clusters), -np.inf) bb_indices_to_show = np.full((n_clusters), -np.inf) # rank pairs by their mean saliency value bb_cls_attn1 = saliency_map1[bbs_mask] bb_cls_attn2 = saliency_map2[nn_1[bbs_mask]] bb_cls_attn = (bb_cls_attn1 + bb_cls_attn2) / 2 ranks = bb_cls_attn for k in range(n_clusters): for i, (label, rank) in enumerate(zip(kmeans.labels_, ranks)): if rank > bb_topk_sims[label]: bb_topk_sims[label] = rank bb_indices_to_show[label] = i # get coordinates to show indices_to_show = torch.nonzero(bbs_mask, as_tuple=False).squeeze(dim=1)[ bb_indices_to_show] # close bbs img1_indices_to_show = torch.arange(num_patches1[0] * num_patches1[1], device=device)[indices_to_show] img2_indices_to_show = nn_1[indices_to_show] # coordinates in descriptor map's dimensions img1_y_to_show = (img1_indices_to_show / num_patches1[1]).cpu().numpy() img1_x_to_show = (img1_indices_to_show % num_patches1[1]).cpu().numpy() img2_y_to_show = (img2_indices_to_show / num_patches2[1]).cpu().numpy() img2_x_to_show = (img2_indices_to_show % num_patches2[1]).cpu().numpy() points1, points2 = [], [] for y1, x1, y2, x2 in zip(img1_y_to_show, img1_x_to_show, img2_y_to_show, img2_x_to_show): x1_show = (int(x1) - 1) * extractor.stride[1] + extractor.stride[1] + extractor.p // 2 y1_show = (int(y1) - 1) * extractor.stride[0] + extractor.stride[0] + extractor.p // 2 x2_show = (int(x2) - 1) * extractor.stride[1] + extractor.stride[1] + extractor.p // 2 y2_show = (int(y2) - 1) * extractor.stride[0] + extractor.stride[0] + extractor.p // 2 points1.append((y1_show, x1_show)) points2.append((y2_show, x2_show)) return points1, points2, image1_pil, image2_pil def draw_correspondences(points1: List[Tuple[float, float]], points2: List[Tuple[float, float]], image1: Image.Image, image2: Image.Image) -> Tuple[plt.Figure, plt.Figure]: """ draw point correspondences on images. :param points1: a list of (y, x) coordinates of image1, corresponding to points2. :param points2: a list of (y, x) coordinates of image2, corresponding to points1. :param image1: a PIL image. :param image2: a PIL image. :return: two figures of images with marked points. """ assert len(points1) == len(points2), f"points lengths are incompatible: {len(points1)} != {len(points2)}." num_points = len(points1) fig1, ax1 = plt.subplots() ax1.axis('off') fig2, ax2 = plt.subplots() ax2.axis('off') ax1.imshow(image1) ax2.imshow(image2) if num_points > 15: cmap = plt.get_cmap('tab10') else: cmap = ListedColormap(["red", "yellow", "blue", "lime", "magenta", "indigo", "orange", "cyan", "darkgreen", "maroon", "black", "white", "chocolate", "gray", "blueviolet"]) colors = np.array([cmap(x) for x in range(num_points)]) radius1, radius2 = 8, 1 for point1, point2, color in zip(points1, points2, colors): y1, x1 = point1 circ1_1 = plt.Circle((x1, y1), radius1, facecolor=color, edgecolor='white', alpha=0.5) circ1_2 = plt.Circle((x1, y1), radius2, facecolor=color, edgecolor='white') ax1.add_patch(circ1_1) ax1.add_patch(circ1_2) y2, x2 = point2 circ2_1 = plt.Circle((x2, y2), radius1, facecolor=color, edgecolor='white', alpha=0.5) circ2_2 = plt.Circle((x2, y2), radius2, facecolor=color, edgecolor='white') ax2.add_patch(circ2_1) ax2.add_patch(circ2_2) return fig1, fig2 def chunk_cosine_sim(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: """ Computes cosine similarity between all possible pairs in two sets of vectors. Operates on chunks so no large amount of GPU RAM is required. :param x: an tensor of descriptors of shape Bx1x(t_x)xd' where d' is the dimensionality of the descriptors and t_x is the number of tokens in x. :param y: a tensor of descriptors of shape Bx1x(t_y)xd' where d' is the dimensionality of the descriptors and t_y is the number of tokens in y. :return: cosine similarity between all descriptors in x and all descriptors in y. Has shape of Bx1x(t_x)x(t_y) """ result_list = [] num_token_x = x.shape[2] for token_idx in range(num_token_x): token = x[:, :, token_idx, :].unsqueeze(dim=2) # Bx1x1xd' result_list.append(torch.nn.CosineSimilarity(dim=3)(token, y)) # Bx1xt return torch.stack(result_list, dim=2) # Bx1x(t_x)x(t_y) """ taken from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse""" def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') if __name__ == "__main__": parser = argparse.ArgumentParser(description='Facilitate ViT Descriptor point correspondences.') parser.add_argument('--root_dir', type=str, required=True, help='The root dir of image pairs.') parser.add_argument('--save_dir', type=str, required=True, help='The root save dir for image pairs results.') parser.add_argument('--load_size', default=224, type=int, help='load size of the input image.') parser.add_argument('--stride', default=4, type=int, help="""stride of first convolution layer. small stride -> higher resolution.""") parser.add_argument('--model_type', default='dino_vits8', type=str, help="""type of model to extract. Choose from [dino_vits8 | dino_vits16 | dino_vitb8 | dino_vitb16 | vit_small_patch8_224 | vit_small_patch16_224 | vit_base_patch8_224 | vit_base_patch16_224]""") parser.add_argument('--facet', default='key', type=str, help="""facet to create descriptors from. options: ['key' | 'query' | 'value' | 'token']""") parser.add_argument('--layer', default=9, type=int, help="layer to create descriptors from.") parser.add_argument('--bin', default='True', type=str2bool, help="create a binned descriptor if True.") parser.add_argument('--thresh', default=0.05, type=float, help='saliency maps threshold to distinguish fg / bg.') parser.add_argument('--num_pairs', default=10, type=int, help='Final number of correspondences.') args = parser.parse_args() with torch.no_grad(): # prepare directories root_dir = Path(args.root_dir) pair_dirs = [x for x in root_dir.iterdir() if x.is_dir()] save_dir = Path(args.save_dir) save_dir.mkdir(exist_ok=True, parents=True) for pair_dir in tqdm(pair_dirs): curr_images = [x for x in pair_dir.iterdir() if x.suffix.lower() in ['.jpg', '.png', '.jpeg']] assert len(curr_images) == 2, f"{pair_dir} contains {len(curr_images)} images instead of 2." curr_save_dir = save_dir / pair_dir.name curr_save_dir.mkdir(parents=True, exist_ok=True) # compute point correspondences points1, points2, image1_pil, image2_pil = find_correspondences(curr_images[0], curr_images[1], args.num_pairs, args.load_size, args.layer, args.facet, args.bin, args.thresh) # saving point correspondences file1 = open(curr_save_dir / "correspondence_A.txt", "w") file2 = open(curr_save_dir / "correspondence_Bt.txt", "w") for point1, point2 in zip(points1, points2): file1.write(f'{point1}\n') file2.write(f'{point2}\n') file1.close() file2.close() fig1, fig2 = draw_correspondences(points1, points2, image1_pil, image2_pil) fig1.savefig(curr_save_dir / f'{Path(curr_images[0]).stem}_corresp.png', bbox_inches='tight', pad_inches=0) fig2.savefig(curr_save_dir / f'{Path(curr_images[1]).stem}_corresp.png', bbox_inches='tight', pad_inches=0) plt.close('all')