Path: blob/master/Face-Recognition-with-ArcFace/embeddings.py
3118 views
# Original code1# https://github.com/ZhaoJ9014/face.evoLVe.PyTorch/blob/master/util/extract_feature_v1.py23import os45import cv26import numpy as np7import torch8import torch.utils.data as data9import torchvision.datasets as datasets10import torch.nn.functional as F11import torchvision.transforms as transforms12from backbone import Backbone13from tqdm import tqdm141516def get_embeddings(data_root, model_root, input_size=[112, 112], embedding_size=512):1718device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")1920# check data and model paths21assert os.path.exists(data_root)22assert os.path.exists(model_root)23print(f"Data root: {data_root}")2425# define image preprocessing26transform = transforms.Compose(27[28transforms.Resize(29[int(128 * input_size[0] / 112), int(128 * input_size[0] / 112)],30), # smaller side resized31transforms.CenterCrop([input_size[0], input_size[1]]),32transforms.ToTensor(),33transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),34],35)3637# define data loader38dataset = datasets.ImageFolder(data_root, transform)39loader = data.DataLoader(40dataset, batch_size=1, shuffle=False, pin_memory=True, num_workers=0,41)42print(f"Number of classes: {len(loader.dataset.classes)}")4344# load backbone weigths from a checkpoint45backbone = Backbone(input_size)46backbone.load_state_dict(torch.load(model_root, map_location=torch.device("cpu")))47backbone.to(device)48backbone.eval()4950# get embedding for each face51embeddings = np.zeros([len(loader.dataset), embedding_size])52with torch.no_grad():53for idx, (image, _) in enumerate(54tqdm(loader, desc="Create embeddings matrix", total=len(loader)),55):56embeddings[idx, :] = F.normalize(backbone(image.to(device))).cpu()5758# get all original images59images = []60for img_path, _ in dataset.samples:61img = cv2.imread(img_path)62images.append(img)6364return images, embeddings656667