fork download
  1. import os
  2. import re
  3. import cv2
  4. import time
  5. import copy
  6. import math
  7. import glob
  8. import datetime
  9. import numpy as np
  10. import pandas as pd
  11.  
  12. from multiprocessing import cpu_count
  13.  
  14. import torch
  15. import torch.nn as nn
  16. import torch.optim as optim
  17.  
  18. from torchvision import transforms
  19. from torch.utils.data import Dataset
  20. from torch.autograd import Variable
  21.  
  22. from config import config, parser
  23. from align_faces import FaceAligner
  24. from dataload import FaceDataset, loss_weight
  25. from agegenpredmodel import AgeGenPredModel, image_transformer
  26.  
  27. from collections import OrderedDict
  28.  
  29.  
  30. class AgePredModel:
  31. """ train/test class for age/gender prediction """
  32.  
  33. def __init__(self,
  34. load_best=True,
  35. model_name='res18_cls70',
  36. eval_use_only=False,
  37. new_last_layer=False,
  38. new_training_process=True):
  39. """
  40. :param load_best: if set, load the best weight, else load the latest weights,
  41. usually load the best when doing evaluation, and latest when
  42. doing training.
  43. :param model_name: name used for saving model weight and training info.
  44. :param eval_use_only: if set, model will not load training/testing data, and
  45. change the bahavior of some layers(dropout, batch norm).
  46. :param new_last_layer: if the model only changed last fully connected layers,
  47. if set, only train last fully connected layers at first
  48. 2 epochs.
  49. :param new_training_process: if set, create a new model and start training.
  50. """
  51. # init params
  52. self.model = AgeGenPredModel()
  53. self.model_name = model_name
  54. self.use_gpu = torch.cuda.is_available()
  55. self.transformer = image_transformer()
  56. self.load_best = load_best
  57. self.new_train = new_training_process
  58. self.new_last_layer = new_last_layer
  59. self.checkpoint_best = config.model + "{}_best.nn".format(model_name.lower())
  60. self.checkpoint_last = config.model + "{}_last.nn".format(model_name.lower())
  61. self.csv_path = config.model + self.model_name + ".csv"
  62.  
  63. # training details
  64. self.batch_size = int(parser['TRAIN']['batch_size'])
  65. self.num_epochs = int(parser['TRAIN']['num_epochs'])
  66. self.loading_jobs = int(parser['TRAIN']['jobs_to_load_data'])
  67. self.max_no_reduce = int(parser['TRAIN']['max_no_reduce'])
  68. self.age_cls_unit = int(parser['RacNet']['age_cls_unit'])
  69. self.weight_decay = float(parser['TRAIN']['weight_decay'])
  70. self.age_divide = float(parser['DATA']['age_divide'])
  71. self.min_lr_rate = float(parser['TRAIN']['min_lr_rate'])
  72. self.lr_reduce_by = float(parser['TRAIN']['lr_reduce_by'])
  73. self.lr_rate = float(parser['TRAIN']['init_lr_rate'])
  74.  
  75. # reduce loss on gender so the model focus on age pred
  76. self.reduce_gen_loss = float(parser['TRAIN']['reduce_gen_loss'])
  77. self.reduce_age_mae = float(parser['TRAIN']['reduce_age_mae'])
  78.  
  79. self.weight_loaded = False
  80. self.age_cls_criterion = nn.BCELoss(weight=loss_weight)
  81. self.age_rgs_criterion = nn.L1Loss()
  82. self.gender_criterion = nn.CrossEntropyLoss()
  83. self.aligner = FaceAligner()
  84.  
  85. if self.use_gpu:
  86. self.model = self.model.cuda()
  87. self.age_cls_criterion = self.age_cls_criterion.cuda()
  88. self.age_rgs_criterion = self.age_rgs_criterion.cuda()
  89. self.gender_criterion = self.gender_criterion.cuda()
  90.  
  91. # csv checkpoint details
  92. columns = ['Timstamp', 'Epoch', 'Phase', 'AGE ACC', 'AGE MAE', 'GEN ACC',
  93. 'BEST AGE ACC', 'BEST AGE MAE', 'BEST GEN ACC', 'Lr_rate']
  94. self.csv_checkpoint = pd.DataFrame(data=[], columns=columns)
  95. if not self.new_train and os.path.exists(self.csv_path):
  96. self.csv_checkpoint = pd.read_csv(self.csv_path)
  97.  
  98. # load no training data when evaluation,
  99. if not eval_use_only:
  100. self.load_data()
  101.  
  102. def load_data(self):
  103. """
  104. initiate dataloader processes
  105. :return:
  106. """
  107. print("[AgePredModel] load_data: start loading...")
  108. image_datasets = {x: FaceDataset(config.pics + x + '/', self.transformer[x])
  109. for x in ['train', 'val']}
  110. self.dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
  111. batch_size=self.batch_size,
  112. shuffle=True,
  113. num_workers=self.loading_jobs)
  114. for x in ['train', 'val']}
  115. self.dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
  116. print("[AgePredModel] load_data: Done! Get {} for train and {} for test!"
  117. .format(self.dataset_sizes['train'],
  118. self.dataset_sizes['val']))
  119. print("[AgePredModel] load_data: loading finished !")
  120.  
  121. @staticmethod
  122. def rand_init_layer(m):
  123. """
  124. initialization method
  125. :param m: torch.module
  126. :return:
  127. """
  128. if isinstance(m, nn.Conv2d):
  129. n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
  130. m.weight.data.normal_(0, math.sqrt(2. / n))
  131. elif isinstance(m, nn.BatchNorm2d):
  132. m.weight.data.fill_(1)
  133. m.bias.data.zero_()
  134. elif isinstance(m, nn.Linear):
  135. size = m.weight.size()
  136. fan_out = size[0] # number of rows
  137. fan_in = size[1] # number of columns
  138. variance = np.sqrt(2.0 / (fan_in + fan_out))
  139. m.weight.data.normal_(0.0, variance)
  140.  
  141. def soft_load_statedic(self, state_dict):
  142. """
  143. WARNING: Always run model = nn.DataParallel after this!
  144. load network parameters in a soft way, the original load_statedic
  145. func from torch is prone to raise exceptions when mismatch. this
  146. function skip all incapatible weights and print the info intead of
  147. raising a exception.
  148. :param state_dict: saved dict
  149. :return:
  150. """
  151. # remove `module.` prefix when using nn.DataParallel
  152. new_state_dict = OrderedDict()
  153. for name, weight in state_dict.items():
  154. if len(name) >= 7 and name[:7].lower() == 'module.':
  155. name = name[7:]
  156. new_state_dict[name] = weight
  157. state_dict = new_state_dict
  158.  
  159. # start loading
  160. own_state = self.model.state_dict()
  161. error_layers = []
  162. for name, param in state_dict.items():
  163. if name in own_state:
  164. if isinstance(param, nn.Parameter):
  165. param = param.data
  166. try:
  167. own_state[name].copy_(param)
  168. except Exception:
  169. print('[soft_load_statedic] WARNING: incapatible dim found for {} = {} != {}.'
  170. .format(name, own_state[name].size(), param.size()))
  171. error_layers.append(name)
  172. else:
  173. print('[soft_load_statedic] Unexpected key "{}" in saved state_dict'.format(name))
  174. missing = set.union(set(own_state.keys()) - set(state_dict.keys()), set(error_layers))
  175. if len(missing) > 0:
  176. print('[soft_load_statedic] keys in state_dict: "{}" not loaded!'.format(missing))
  177. return
  178.  
  179. def train_model(self):
  180. print("[AgePredModel] train_model: Start training...")
  181.  
  182. # 1.0.0.0 define Vars
  183. best_gen_acc = 0.
  184. best_age_acc = 0.
  185. best_age_mae = 99.
  186. not_reduce_rounds = 0
  187.  
  188. # 2.0.0.0 init optimizer
  189. self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
  190. lr=self.lr_rate,
  191. weight_decay=self.weight_decay)
  192.  
  193. # 3.0.0.0 load weights if possible
  194. checkpoint_path = self.checkpoint_best if self.load_best else self.checkpoint_last
  195. if self.new_train:
  196. print("[new_training_process] NO WEIGHT LOADED!")
  197. elif os.path.exists(checkpoint_path):
  198. checkpoint = torch.load(checkpoint_path, map_location=None if self.use_gpu else 'cpu')
  199. self.soft_load_statedic(checkpoint['state_dic'])
  200. print("[train_model] Params Loading Finished!")
  201. self.weight_loaded = True
  202. try:
  203. best_gen_acc = checkpoint['best_gen_acc']
  204. best_age_acc = checkpoint['best_age_acc']
  205. best_age_mae = checkpoint['best_age_mae']
  206. # self.lr_rate = checkpoint['lr_rate']
  207. self.optimizer.load_state_dict(checkpoint['optimizer'])
  208. for param_group in self.optimizer.param_groups:
  209. param_group['lr'] = self.lr_rate
  210. print("[train_model] Load Optimizer Successful!")
  211. except:
  212. print("[train_model] ERROR: Loading Params/Optimizer Error!")
  213. else:
  214. print("[train_model] Checkpoint Not Found, Train From Scratch!")
  215.  
  216. # report model params
  217. all_params = sum([np.prod(p.size()) for p in self.model.parameters()])
  218. trainable_params = sum([np.prod(p.size()) for p in
  219. filter(lambda p: p.requires_grad, self.model.parameters())])
  220. print("[AgePredModel] Model has {}k out of {}k trainable params "
  221. .format(trainable_params // 1000, all_params // 1000))
  222.  
  223. # use when having multiple GPUs available
  224. if torch.cuda.device_count() > 1:
  225. self.model = nn.DataParallel(self.model)
  226.  
  227. # 4.0.0.0 start each epoch
  228. layer_to_freeze = 0
  229. for epoch in range(self.num_epochs):
  230. print('\nStart Epoch {}/{} ...'.format(epoch + 1, self.num_epochs))
  231. print('-' * 16)
  232.  
  233. # automatically freeze some layers on first 2 epochs
  234. if epoch == 0:
  235. new_layer_to_freeze = 8 # resnet-18 has 8 modules in pytorch
  236. elif epoch == 1:
  237. new_layer_to_freeze = 6
  238. else:
  239. new_layer_to_freeze = 0
  240. if (self.new_last_layer or self.new_train) \
  241. and layer_to_freeze != new_layer_to_freeze:
  242. layer_to_freeze = new_layer_to_freeze
  243. # free some layers
  244. model = self.model
  245. if torch.cuda.device_count() > 1:
  246. model = self.model.module
  247. for i, child in enumerate(model.resNet.children()):
  248. requires_grad = i >= int(layer_to_freeze)
  249. for param in child.parameters():
  250. param.requires_grad = requires_grad
  251. # re-define the optimizer
  252. self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
  253. lr=self.lr_rate,
  254. weight_decay=self.weight_decay)
  255.  
  256. # 4.1.0.0 loop over training and validation phase
  257. for phase in ['train', 'val']:
  258. # 4.1.1.0 shift train/eval model
  259. self.model.train(phase == 'train')
  260. torch.cuda.empty_cache()
  261.  
  262. epoch_age_tp = 0.
  263. epoch_age_mae = 0.
  264. epoch_gender_tp = 0.
  265. processed_data = 0
  266.  
  267. # 4.1.2.0 iterate over each batch.
  268. epoch_start_time = time.time()
  269. for data in self.dataloaders[phase]:
  270. # 4.1.2.1 get the inputs and labels
  271. inputs, gender_true, age_rgs_true, age_cls_true = data
  272. processed_data += inputs.size(0)
  273.  
  274. # 4.1.2.2 wrap inputs&oputpus into Variable
  275. # NOTE: set voloatile = True when
  276. # doing evaluation helps reduce
  277. # gpu mem usage.
  278. volatile = phase == 'val'
  279. if self.use_gpu:
  280. inputs = Variable(inputs.cuda(), requires_grad=False)
  281. gender_true = Variable(gender_true.cuda(), requires_grad=False)
  282. # age_rgs_true = Variable(age_rgs_true.cuda(), volatile=volatile)
  283. age_cls_true = Variable(age_cls_true.cuda(), requires_grad=False)
  284. else:
  285. inputs = Variable(inputs, requires_grad=False)
  286. gender_true = Variable(gender_true, requires_grad=False)
  287. # age_rgs_true = Variable(age_rgs_true, volatile=volatile)
  288. age_cls_true = Variable(age_cls_true, requires_grad=False)
  289.  
  290. # 4.1.2.3 zero gradients
  291. self.optimizer.zero_grad()
  292.  
  293. # 4.1.2.4 forward and get outputs
  294. gender_out, age_out = self.model(inputs)
  295. _, gender_pred = torch.max(gender_out, 1)
  296. _, max_cls_pred_age = torch.max(age_out, 1)
  297. gender_true = gender_true.view(-1)
  298. age_cls_true = age_cls_true.view(-1, self.age_cls_unit)
  299.  
  300. # 4.1.2.5 get the loss
  301. gender_loss = self.gender_criterion(gender_out, gender_true)
  302. age_cls_loss = self.age_cls_criterion(age_out, age_cls_true)
  303. # age_rgs_loss = self.age_rgs_criterion(age_out, age_rgs_true)
  304.  
  305. # *Note: reduce some age loss and gender loss
  306. # enforce the model to focuse on reducing
  307. # age classification loss
  308. gender_loss *= self.reduce_gen_loss
  309. # age_rgs_loss *= self.reduce_age_mae
  310.  
  311. # loss = gender_loss + age_rgs_loss + age_cls_loss
  312. # loss = age_rgs_loss
  313. loss = age_cls_loss
  314. loss = gender_loss + age_cls_loss
  315.  
  316.  
  317.  
  318. gender_loss_perc = 100 * (gender_loss / loss).cpu().data.numpy().item()
  319. age_cls_loss_perc = 100 * (age_cls_loss / loss).cpu().data.numpy().item()
  320. # age_rgs_loss_perc = 100 * (age_rgs_loss / loss).cpu().data.numpy()[0]
  321.  
  322. age_rgs_loss_perc = 0
  323. # age_cls_loss_perc = 0
  324. # gender_loss_perc = 0
  325.  
  326. # convert cls result to rgs result by weigted sum
  327. weigh = np.linspace(1, self.age_cls_unit, self.age_cls_unit)
  328. age_cls_raw = age_out.cpu().data.numpy()
  329. age_cls_raw = np.sum(age_cls_raw * weigh, axis=1)
  330. age_rgs_true = age_rgs_true.view(-1)
  331. age_rgs_true = age_rgs_true.cpu().numpy() * self.age_divide
  332. age_rgs_loss = np.mean(np.abs(age_cls_raw - age_rgs_true))
  333.  
  334. # 4.1.2.6 backward + optimize only if in training phase
  335. if phase == 'train':
  336. loss.backward()
  337. self.optimizer.step()
  338.  
  339. # 4.1.2.7 statistics
  340. gender_pred = gender_pred.cpu().data.numpy()
  341. gender_true = gender_true.cpu().data.numpy()
  342. batch_gender_tp = np.sum(gender_pred == gender_true)
  343.  
  344. max_cls_pred_age = max_cls_pred_age.cpu().data.numpy()
  345. age_cls_true = age_rgs_true
  346. batch_age_tp = np.sum(np.abs(age_cls_true - max_cls_pred_age) <= 2) # if true, MAE < 5
  347.  
  348. epoch_age_mae += age_rgs_loss * inputs.size(0)
  349. epoch_age_tp += batch_age_tp
  350. epoch_gender_tp += batch_gender_tp
  351.  
  352. # 4.1.2.8 print info for each bach done
  353. print("|| {:.2f}% {}/{} || LOSS = {:.2f} || DISTR% {:.0f} : {:.0f} : {:.0f} "
  354. "|| AMAE/AACC±2/GACC = {:.2f} / {:.2f}% / {:.2f}% "
  355. "|| LR {} || ETA {:.0f}s || BEST {:.2f} / {:.2f}% / {:.2f}% ||"
  356. .format(100 * processed_data / self.dataset_sizes[phase],
  357. processed_data,
  358. self.dataset_sizes[phase],
  359. loss.cpu().data.numpy().item(),
  360. age_rgs_loss_perc,
  361. age_cls_loss_perc,
  362. gender_loss_perc,
  363. age_rgs_loss,
  364. 100 * batch_age_tp / inputs.size(0),
  365. 100 * batch_gender_tp / inputs.size(0),
  366. self.lr_rate,
  367. (self.dataset_sizes[phase] - processed_data) * (
  368. time.time() - epoch_start_time) / processed_data,
  369. best_age_mae,
  370. 100 * best_age_acc,
  371. 100 * best_gen_acc),
  372. end='\r')
  373.  
  374. # 4.1.2.9 unlink cuda variables and free up mem
  375. del inputs, gender_true, age_rgs_true, age_cls_true
  376. del age_rgs_loss, loss # , gen_loss, age_cls_loss
  377. del gender_loss_perc, age_cls_loss_perc, age_rgs_loss_perc
  378.  
  379. # 4.1.3.0 epoch done
  380. epoch_gender_acc = epoch_gender_tp / self.dataset_sizes[phase]
  381. epoch_age_acc = epoch_age_tp / self.dataset_sizes[phase]
  382. epoch_age_mae /= self.dataset_sizes[phase]
  383.  
  384. # 4.1.4.0 print info after each epoch done
  385. print('\n--{} {}/{} Done! '
  386. '|| AMAE/AACC±2/GACC = {:.2f} / {:.2f}% / {:.2f}% '
  387. '|| COST {:.0f}s'
  388. .format(phase.upper(),
  389. epoch,
  390. self.num_epochs,
  391. epoch_age_mae,
  392. 100 * epoch_age_acc,
  393. 100 * epoch_gender_acc,
  394. time.time() - epoch_start_time))
  395.  
  396. # 4.1.5.0, save model weights
  397. if phase == 'val' and epoch_age_mae < best_age_mae:
  398. best_gen_acc = epoch_gender_acc
  399. best_age_acc = epoch_age_acc
  400. best_age_mae = epoch_age_mae
  401. best_model_wts = copy.deepcopy(self.model.state_dict())
  402. torch.save({'epoch': epoch,
  403. 'state_dic': best_model_wts,
  404. "best_gen_acc": best_gen_acc,
  405. "best_age_acc": best_age_acc,
  406. "best_age_mae": best_age_mae,
  407. "lr_rate": self.lr_rate,
  408. "optimizer": self.optimizer.state_dict()
  409. }, self.checkpoint_best)
  410. not_reduce_rounds = 0
  411. print("--New BEST FOUND!! || "
  412. " AMAE/AACC/AACC±2/GACC = {:.2f} / {:.2f}% / {:.2f}%"
  413. .format(best_age_mae,
  414. 100 * best_age_acc,
  415. 100 * best_gen_acc))
  416. elif phase == 'val':
  417. not_reduce_rounds += 1
  418. torch.save({'epoch': epoch,
  419. 'state_dic': self.model.state_dict(),
  420. "best_gen_acc": best_gen_acc,
  421. "best_age_acc": best_age_acc,
  422. "best_age_mae": best_age_mae,
  423. "lr_rate": self.lr_rate,
  424. "optimizer": self.optimizer.state_dict()
  425. }, self.checkpoint_last)
  426.  
  427. # 4.1.6.0 save csv logging file
  428. try:
  429. self.csv_checkpoint.loc[len(self.csv_checkpoint)] = [str(datetime.datetime.now()),
  430. epoch,
  431. phase,
  432. epoch_age_acc,
  433. epoch_age_mae,
  434. epoch_gender_acc,
  435. best_age_acc,
  436. best_age_mae,
  437. best_gen_acc,
  438. self.lr_rate]
  439. self.csv_checkpoint.to_csv(self.csv_path, index=False)
  440. except:
  441. print("Error when saving csv files! [tip]: Please check csv column names.")
  442. print(self.csv_checkpoint.columns)
  443.  
  444. # 4.1.7.0 reduce learning rate if nessessary
  445. if phase == "val" \
  446. and not_reduce_rounds >= self.max_no_reduce \
  447. and self.lr_rate > self.min_lr_rate:
  448. self.lr_rate = max(self.min_lr_rate, self.lr_rate / self.lr_reduce_by)
  449. print("[reduce_lr_rate] Reduce Learning Rate From {} --> {}"
  450. .format(self.lr_rate * self.lr_reduce_by, self.lr_rate))
  451. for param_group in self.optimizer.param_groups:
  452. param_group['lr'] = self.lr_rate
  453. not_reduce_rounds = 0
  454.  
  455. # 4.2.0.0 train/val loop ends
  456. # 5.0.0.0 Trainning Completes!
  457. return self.model
  458.  
  459. # """
  460. #
  461. # # evaluate function is just a pruned version of train function
  462. #
  463. # def evaluate(self):
  464. # checkpoint_path = self.checkpoint_best if self.load_best else self.checkpoint_last
  465. # checkpoint = torch.load(checkpoint_path, map_location=None if self.use_gpu else 'cpu')
  466. # self.soft_load_statedic(checkpoint['state_dic'])
  467. # self.model.train(mode=False)
  468. #
  469. # epoch_age_tp = 0.
  470. # epoch_age_mae = 0.
  471. # epoch_gender_tp = 0.
  472. # processed_data = 0
  473. #
  474. # # 4.1.2.0 Iterate over data.
  475. # epoch_start_time = time.time()
  476. # phase = 'val'
  477. # for data in self.dataloaders[phase]:
  478. # # 4.1.2.1 get the inputs and labels
  479. # inputs, gender_true, age_rgs_true, age_cls_true = data
  480. # processed_data += self.batch_size
  481. #
  482. # # 4.1.2.2 wrap inputs&oputpus into Variable
  483. # # NOTE: set voloatile = True when
  484. # # doing evaluation helps reduce
  485. # # gpu mem usage.
  486. # volatile = phase == 'val'
  487. # if self.use_gpu:
  488. # inputs = Variable(inputs.cuda(), volatile=volatile)
  489. # gender_true = Variable(gender_true.cuda(), volatile=volatile)
  490. # age_cls_true = Variable(age_cls_true.cuda(), volatile=volatile)
  491. # else:
  492. # inputs = Variable(inputs, volatile=volatile)
  493. # gender_true = Variable(gender_true, volatile=volatile)
  494. # age_cls_true = Variable(age_cls_true, volatile=volatile)
  495. #
  496. # # 4.1.2.4 forward and get outputs
  497. # gender_out, age_cls_out = self.model(inputs)
  498. # _, gender_pred = torch.max(gender_out, 1)
  499. # _, age_cls_pred = torch.max(age_cls_out, 1)
  500. # gender_true = gender_true.view(-1)
  501. # age_cls_true = age_cls_true.view(-1, 99)
  502. #
  503. # # 4.1.2.5 get loss
  504. # # print(age_cls_out.size(), age_cls_true.size(), loss_weight.size())
  505. # gender_loss = self.gender_criterion(gender_out, gender_true)
  506. # age_cls_loss = self.age_cls_criterion(age_cls_out, age_cls_true)
  507. # # age_rgs_loss = self.age_rgs_criterion(age_rgs_pred, age_rgs_true)
  508. #
  509. # # *Note: reduce some age loss and gender loss
  510. # # enforce the model to focuse on reducing
  511. # # age classification loss
  512. # gender_loss *= self.reduce_gen_loss
  513. # # age_rgs_loss *= self.reduce_age_mae
  514. #
  515. # # loss = gender_loss + age_rgs_loss + age_cls_loss
  516. # # loss = age_cls_loss
  517. # loss = gender_loss + age_cls_loss
  518. #
  519. # gender_loss_perc = 100 * (gender_loss / loss).cpu().data.numpy()[0]
  520. # age_cls_loss_perc = 100 * (age_cls_loss / loss).cpu().data.numpy()[0]
  521. # # age_rgs_loss_perc = 100 * (age_rgs_loss / loss).cpu().data.numpy()[0]
  522. #
  523. # age_rgs_loss_perc = 0
  524. # weigh = np.linspace(1, 99, 99)
  525. # age_cls_raw = age_cls_out.cpu().data.numpy()
  526. # age_cls_raw = np.sum(age_cls_raw * weigh, axis=1)
  527. # age_rgs_true = age_rgs_true.view(-1)
  528. # age_rgs_true = age_rgs_true.cpu().numpy() * self.age_divide
  529. # age_rgs_loss = np.mean(np.abs(age_cls_raw - age_rgs_true))
  530. #
  531. # # 4.1.2.7 statistics
  532. # gender_pred = gender_pred.cpu().data.numpy()
  533. # gender_true = gender_true.cpu().data.numpy()
  534. # batch_gender_tp = np.sum(gender_pred == gender_true)
  535. #
  536. # age_cls_pred = age_cls_pred.cpu().data.numpy()
  537. # age_cls_true = age_rgs_true
  538. # batch_age_tp = np.sum(np.abs(age_cls_true - age_cls_pred) <= 2) # if true, MAE < 5
  539. #
  540. # epoch_age_mae += age_rgs_loss * inputs.size(0)
  541. # epoch_age_tp += batch_age_tp
  542. # epoch_gender_tp += batch_gender_tp
  543. #
  544. # # 4.1.2.8 print info for each bach done
  545. # print("|| {:.2f}% {}/{} || LOSS = {:.2f} || DISTR% {:.0f} : {:.0f} : {:.0f} "
  546. # "|| AMAE/AACC±2/GACC = {:.2f} / {:.2f}% / {:.2f}% "
  547. # "|| LR {} || ETA {:.0f}s "
  548. # .format(100 * processed_data / self.dataset_sizes[phase],
  549. # processed_data,
  550. # self.dataset_sizes[phase],
  551. # loss.cpu().data.numpy()[0],
  552. # age_rgs_loss_perc,
  553. # age_cls_loss_perc,
  554. # gender_loss_perc,
  555. # age_rgs_loss,
  556. # # self.age_divide * age_rgs_loss.cpu().data.numpy()[0],
  557. # 100 * batch_age_tp / inputs.size(0),
  558. # 100 * batch_gender_tp / inputs.size(0),
  559. # self.lr_rate,
  560. # (self.dataset_sizes[phase] - processed_data) * (time.time() - epoch_start_time) / processed_data,
  561. # end='\r'))
  562. #
  563. # # 4.1.2.9 unlink cuda variables and free up mem
  564. # del inputs, gender_true, age_rgs_true, age_cls_true
  565. # del age_cls_loss, age_rgs_loss, loss # , gen_loss
  566. # del gender_loss_perc, age_cls_loss_perc, age_rgs_loss_perc
  567. #
  568. # # 4.1.3.0 epoch done
  569. # epoch_gender_acc = epoch_gender_tp / self.dataset_sizes[phase]
  570. # epoch_age_acc = epoch_age_tp / self.dataset_sizes[phase]
  571. # epoch_age_mae /= self.dataset_sizes[phase]
  572. #
  573. # # 4.1.4.0 print info after each epoch done
  574. # print('\n--{} Done! '
  575. # '|| AMAE/AACC±2/GACC = {:.2f} / {:.2f}% / {:.2f}% '
  576. # '|| COST {:.0f}s'
  577. # .format(phase.upper(),
  578. # epoch_age_mae,
  579. # # self.age_divide * epoch_age_mae,
  580. # 100 * epoch_age_acc,
  581. # 100 * epoch_gender_acc,
  582. # time.time() - epoch_start_time))
  583. # """
  584.  
  585. def getAgeGender(self,
  586. img,
  587. transformed=False,
  588. return_all_faces=True,
  589. return_info=False):
  590. """
  591. evaluation/test funtion
  592. :param img: str or numpy array represent the image
  593. :param transformed: if the image is transformed into standarlized pytorch image.
  594. applicable when using this in train loop
  595. :param return_all_faces: if set, return prediction results of all faces detected.
  596. set to False if it's known that all images comtain only 1 face
  597. :param return_info: if set, return a list of rects (x, y, w, h) represents loc of faces
  598. :return: a list of [gender_pred, age_pred]
  599. """
  600. # load model params
  601. if not self.weight_loaded:
  602. path = self.checkpoint_best if self.load_best else self.checkpoint_last
  603. checkpoint = torch.load(path, map_location='gpu' if self.use_gpu else 'cpu')
  604. self.soft_load_statedic(checkpoint['state_dic'])
  605. # self.model.load_state_dict(checkpoint['state_dic'])
  606. self.model.train(False)
  607. self.weight_loaded = True
  608.  
  609. # load images if not provided
  610. if type(img) == str:
  611. img = cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB)
  612.  
  613. # get faces and rects
  614. aligned = self.aligner.getAligns(img, return_info=return_info)
  615. if return_info:
  616. aligned, rects, scores = aligned
  617. if not len(aligned): # no face detected
  618. scores = [1]
  619. rects = [(0, 0, img.shape[0], img.shape[1])]
  620. faces = [img]
  621. else:
  622. faces = aligned
  623. if not return_all_faces:
  624. faces = faces[0]
  625. faces = [transforms.ToPILImage()(fc) for fc in faces]
  626. if not transformed:
  627. faces = [self.transformer['val'](fc) for fc in faces]
  628.  
  629. # get predictions of each face
  630. preds = self.model.evaluate(faces)
  631.  
  632. if return_info:
  633. return preds, rects, scores
  634. return preds
  635.  
  636.  
  637. if __name__ == "__main__":
  638. a = AgePredModel(model_name='res18_cls70',
  639. new_training_process=False,
  640. new_last_layer=True)
  641. a.train_model()
  642. # a.evaluate()
  643. # print(a.getAgeGender(config.val + "6_0_MurderofElisaIzquierdo.jpg"))
  644. # a.divideTrainVal()
  645. # a.img2matrix()
  646. # face_dataset = FaceDataset()
  647. # print(face_dataset[1])
  648.  
  649.  
  650.  
  651.  
  652.  
  653.  
Runtime error #stdin #stdout #stderr 0.12s 23832KB
stdin
Standard input is empty
stdout
Standard output is empty
stderr
Traceback (most recent call last):
  File "./prog.py", line 3, in <module>
    import cv2
ModuleNotFoundError: No module named 'cv2'