Path: blob/master/FaceMaskOverlay/lib/datasets/wflw.py
3443 views
# ------------------------------------------------------------------------------1# Copyright (c) Microsoft2# Licensed under the MIT License.3# Created by Tianheng Cheng([email protected]), Yang Zhao4# ------------------------------------------------------------------------------56import os7import random89import torch10import torch.utils.data as data11import pandas as pd12from PIL import Image13import numpy as np1415from ..utils.transforms import fliplr_joints, crop, generate_target, transform_pixel161718class WFLW(data.Dataset):19def __init__(self, cfg, is_train=True, transform=None):20# specify annotation file for dataset21if is_train:22self.csv_file = cfg.DATASET.TRAINSET23else:24self.csv_file = cfg.DATASET.TESTSET2526self.is_train = is_train27self.transform = transform28self.data_root = cfg.DATASET.ROOT29self.input_size = cfg.MODEL.IMAGE_SIZE30self.output_size = cfg.MODEL.HEATMAP_SIZE31self.sigma = cfg.MODEL.SIGMA32self.scale_factor = cfg.DATASET.SCALE_FACTOR33self.rot_factor = cfg.DATASET.ROT_FACTOR34self.label_type = cfg.MODEL.TARGET_TYPE35self.flip = cfg.DATASET.FLIP3637# load annotations38self.landmarks_frame = pd.read_csv(self.csv_file)3940self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)41self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)4243def __len__(self):44return len(self.landmarks_frame)4546def __getitem__(self, idx):4748image_path = os.path.join(self.data_root,49self.landmarks_frame.iloc[idx, 0])50scale = self.landmarks_frame.iloc[idx, 1]5152center_w = self.landmarks_frame.iloc[idx, 2]53center_h = self.landmarks_frame.iloc[idx, 3]54center = torch.Tensor([center_w, center_h])5556pts = self.landmarks_frame.iloc[idx, 4:].values57pts = pts.astype('float').reshape(-1, 2)5859scale *= 1.2560nparts = pts.shape[0]61img = np.array(Image.open(image_path).convert('RGB'), dtype=np.float32)6263r = 064if self.is_train:65scale = scale * (random.uniform(1 - self.scale_factor,661 + self.scale_factor))67r = random.uniform(-self.rot_factor, self.rot_factor) \68if random.random() <= 0.6 else 069if random.random() <= 0.5 and self.flip:70img = np.fliplr(img)71pts = fliplr_joints(pts, width=img.shape[1], dataset='WFLW')72center[0] = img.shape[1] - center[0]7374img = crop(img, center, scale, self.input_size, rot=r)7576target = np.zeros((nparts, self.output_size[0], self.output_size[1]))77tpts = pts.copy()7879for i in range(nparts):80if tpts[i, 1] > 0:81tpts[i, 0:2] = transform_pixel(tpts[i, 0:2]+1, center,82scale, self.output_size, rot=r)83target[i] = generate_target(target[i], tpts[i]-1, self.sigma,84label_type=self.label_type)85img = img.astype(np.float32)86img = (img/255.0 - self.mean) / self.std87img = img.transpose([2, 0, 1])88target = torch.Tensor(target)89tpts = torch.Tensor(tpts)90center = torch.Tensor(center)9192meta = {'index': idx, 'center': center, 'scale': scale,93'pts': torch.Tensor(pts), 'tpts': tpts}9495return img, target, meta969798if __name__ == '__main__':99pass100101102