Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/main/train.py
Views: 804
print("started imports")12import sys3import argparse4import time5import cv26import wandb7from PIL import Image8import os910from torch.utils.data import DataLoader11import torch.optim as optim12import torch.nn.functional as F13import torch14import torchvision.transforms as transforms15import torch.optim.lr_scheduler as scheduler1617# custom imports18sys.path.append('./apex/')1920from apex import amp21from network.AEI_Net import *22from network.MultiscaleDiscriminator import *23from utils.training.Dataset import FaceEmbedVGG2, FaceEmbed24from utils.training.image_processing import make_image_list, get_faceswap25from utils.training.losses import hinge_loss, compute_discriminator_loss, compute_generator_losses26from utils.training.detector import detect_landmarks, paint_eyes27from AdaptiveWingLoss.core import models28from arcface_model.iresnet import iresnet1002930print("finished imports")313233def train_one_epoch(G: 'generator model',34D: 'discriminator model',35opt_G: "generator opt",36opt_D: "discriminator opt",37scheduler_G: "scheduler G opt",38scheduler_D: "scheduler D opt",39netArc: 'ArcFace model',40model_ft: 'Landmark Detector',41args: 'Args Namespace',42dataloader: torch.utils.data.DataLoader,43device: 'torch device',44epoch:int,45loss_adv_accumulated:int):4647for iteration, data in enumerate(dataloader):48start_time = time.time()4950Xs_orig, Xs, Xt, same_person = data5152Xs_orig = Xs_orig.to(device)53Xs = Xs.to(device)54Xt = Xt.to(device)55same_person = same_person.to(device)5657# get the identity embeddings of Xs58with torch.no_grad():59embed = netArc(F.interpolate(Xs_orig, [112, 112], mode='bilinear', align_corners=False))6061diff_person = torch.ones_like(same_person)6263if args.diff_eq_same:64same_person = diff_person6566# generator training67opt_G.zero_grad()6869Y, Xt_attr = G(Xt, embed)70Di = D(Y)71ZY = netArc(F.interpolate(Y, [112, 112], mode='bilinear', align_corners=False))7273if args.eye_detector_loss:74Xt_eyes, Xt_heatmap_left, Xt_heatmap_right = detect_landmarks(Xt, model_ft)75Y_eyes, Y_heatmap_left, Y_heatmap_right = detect_landmarks(Y, model_ft)76eye_heatmaps = [Xt_heatmap_left, Xt_heatmap_right, Y_heatmap_left, Y_heatmap_right]77else:78eye_heatmaps = None7980lossG, loss_adv_accumulated, L_adv, L_attr, L_id, L_rec, L_l2_eyes = compute_generator_losses(G, Y, Xt, Xt_attr, Di,81embed, ZY, eye_heatmaps,loss_adv_accumulated,82diff_person, same_person, args)8384with amp.scale_loss(lossG, opt_G) as scaled_loss:85scaled_loss.backward()86opt_G.step()87if args.scheduler:88scheduler_G.step()8990# discriminator training91opt_D.zero_grad()92lossD = compute_discriminator_loss(D, Y, Xs, diff_person)93with amp.scale_loss(lossD, opt_D) as scaled_loss:94scaled_loss.backward()9596if (not args.discr_force) or (loss_adv_accumulated < 4.):97opt_D.step()98if args.scheduler:99scheduler_D.step()100101102batch_time = time.time() - start_time103104if iteration % args.show_step == 0:105images = [Xs, Xt, Y]106if args.eye_detector_loss:107Xt_eyes_img = paint_eyes(Xt, Xt_eyes)108Yt_eyes_img = paint_eyes(Y, Y_eyes)109images.extend([Xt_eyes_img, Yt_eyes_img])110image = make_image_list(images)111if args.use_wandb:112wandb.log({"gen_images":wandb.Image(image, caption=f"{epoch:03}" + '_' + f"{iteration:06}")})113else:114cv2.imwrite('./images/generated_image.jpg', image[:,:,::-1])115116if iteration % 10 == 0:117print(f'epoch: {epoch} {iteration} / {len(dataloader)}')118print(f'lossD: {lossD.item()} lossG: {lossG.item()} batch_time: {batch_time}s')119print(f'L_adv: {L_adv.item()} L_id: {L_id.item()} L_attr: {L_attr.item()} L_rec: {L_rec.item()}')120if args.eye_detector_loss:121print(f'L_l2_eyes: {L_l2_eyes.item()}')122print(f'loss_adv_accumulated: {loss_adv_accumulated}')123if args.scheduler:124print(f'scheduler_G lr: {scheduler_G.get_last_lr()} scheduler_D lr: {scheduler_D.get_last_lr()}')125126if args.use_wandb:127if args.eye_detector_loss:128wandb.log({"loss_eyes": L_l2_eyes.item()}, commit=False)129wandb.log({"loss_id": L_id.item(),130"lossD": lossD.item(),131"lossG": lossG.item(),132"loss_adv": L_adv.item(),133"loss_attr": L_attr.item(),134"loss_rec": L_rec.item()})135136if iteration % 5000 == 0:137torch.save(G.state_dict(), f'./saved_models_{args.run_name}/G_latest.pth')138torch.save(D.state_dict(), f'./saved_models_{args.run_name}/D_latest.pth')139140torch.save(G.state_dict(), f'./current_models_{args.run_name}/G_' + str(epoch)+ '_' + f"{iteration:06}" + '.pth')141torch.save(D.state_dict(), f'./current_models_{args.run_name}/D_' + str(epoch)+ '_' + f"{iteration:06}" + '.pth')142143if (iteration % 250 == 0) and (args.use_wandb):144### Посмотрим как выглядит свап на трех конкретных фотках, чтобы проследить динамику145G.eval()146147res1 = get_faceswap('examples/images/training//source1.png', 'examples/images/training//target1.png', G, netArc, device)148res2 = get_faceswap('examples/images/training//source2.png', 'examples/images/training//target2.png', G, netArc, device)149res3 = get_faceswap('examples/images/training//source3.png', 'examples/images/training//target3.png', G, netArc, device)150151res4 = get_faceswap('examples/images/training//source4.png', 'examples/images/training//target4.png', G, netArc, device)152res5 = get_faceswap('examples/images/training//source5.png', 'examples/images/training//target5.png', G, netArc, device)153res6 = get_faceswap('examples/images/training//source6.png', 'examples/images/training//target6.png', G, netArc, device)154155output1 = np.concatenate((res1, res2, res3), axis=0)156output2 = np.concatenate((res4, res5, res6), axis=0)157158output = np.concatenate((output1, output2), axis=1)159160wandb.log({"our_images":wandb.Image(output, caption=f"{epoch:03}" + '_' + f"{iteration:06}")})161162G.train()163164165def train(args, device):166# training params167batch_size = args.batch_size168max_epoch = args.max_epoch169170# initializing main models171G = AEI_Net(args.backbone, num_blocks=args.num_blocks, c_id=512).to(device)172D = MultiscaleDiscriminator(input_nc=3, n_layers=5, norm_layer=torch.nn.InstanceNorm2d).to(device)173G.train()174D.train()175176# initializing model for identity extraction177netArc = iresnet100(fp16=False)178netArc.load_state_dict(torch.load('arcface_model/backbone.pth'))179netArc=netArc.cuda()180netArc.eval()181182if args.eye_detector_loss:183model_ft = models.FAN(4, "False", "False", 98)184checkpoint = torch.load('./AdaptiveWingLoss/AWL_detector/WFLW_4HG.pth')185if 'state_dict' not in checkpoint:186model_ft.load_state_dict(checkpoint)187else:188pretrained_weights = checkpoint['state_dict']189model_weights = model_ft.state_dict()190pretrained_weights = {k: v for k, v in pretrained_weights.items() \191if k in model_weights}192model_weights.update(pretrained_weights)193model_ft.load_state_dict(model_weights)194model_ft = model_ft.to(device)195model_ft.eval()196else:197model_ft=None198199opt_G = optim.Adam(G.parameters(), lr=args.lr_G, betas=(0, 0.999), weight_decay=1e-4)200opt_D = optim.Adam(D.parameters(), lr=args.lr_D, betas=(0, 0.999), weight_decay=1e-4)201202G, opt_G = amp.initialize(G, opt_G, opt_level=args.optim_level)203D, opt_D = amp.initialize(D, opt_D, opt_level=args.optim_level)204205if args.scheduler:206scheduler_G = scheduler.StepLR(opt_G, step_size=args.scheduler_step, gamma=args.scheduler_gamma)207scheduler_D = scheduler.StepLR(opt_D, step_size=args.scheduler_step, gamma=args.scheduler_gamma)208else:209scheduler_G = None210scheduler_D = None211212if args.pretrained:213try:214G.load_state_dict(torch.load(args.G_path, map_location=torch.device('cpu')), strict=False)215D.load_state_dict(torch.load(args.D_path, map_location=torch.device('cpu')), strict=False)216print("Loaded pretrained weights for G and D")217except FileNotFoundError as e:218print("Not found pretrained weights. Continue without any pretrained weights.")219220if args.vgg:221dataset = FaceEmbedVGG2(args.dataset_path, same_prob=args.same_person, same_identity=args.same_identity)222else:223dataset = FaceEmbed([args.dataset_path], same_prob=args.same_person)224225dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8, drop_last=True)226227# Будем считать аккумулированный adv loss, чтобы обучать дискриминатор только когда он ниже порога, если discr_force=True228loss_adv_accumulated = 20.229230for epoch in range(0, max_epoch):231train_one_epoch(G,232D,233opt_G,234opt_D,235scheduler_G,236scheduler_D,237netArc,238model_ft,239args,240dataloader,241device,242epoch,243loss_adv_accumulated)244245def main(args):246device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')247if not torch.cuda.is_available():248print('cuda is not available. using cpu. check if it\'s ok')249250print("Starting traing")251train(args, device=device)252253254if __name__ == "__main__":255parser = argparse.ArgumentParser()256257# dataset params258parser.add_argument('--dataset_path', default='/VggFace2-crop/', help='Path to the dataset. If not VGG2 dataset is used, param --vgg should be set False')259parser.add_argument('--G_path', default='./saved_models/G.pth', help='Path to pretrained weights for G. Only used if pretrained=True')260parser.add_argument('--D_path', default='./saved_models/D.pth', help='Path to pretrained weights for D. Only used if pretrained=True')261parser.add_argument('--vgg', default=True, type=bool, help='When using VGG2 dataset (or any other dataset with several photos for one identity)')262# weights for loss263parser.add_argument('--weight_adv', default=1, type=float, help='Adversarial Loss weight')264parser.add_argument('--weight_attr', default=10, type=float, help='Attributes weight')265parser.add_argument('--weight_id', default=20, type=float, help='Identity Loss weight')266parser.add_argument('--weight_rec', default=10, type=float, help='Reconstruction Loss weight')267parser.add_argument('--weight_eyes', default=0., type=float, help='Eyes Loss weight')268# training params you may want to change269270parser.add_argument('--backbone', default='unet', const='unet', nargs='?', choices=['unet', 'linknet', 'resnet'], help='Backbone for attribute encoder')271parser.add_argument('--num_blocks', default=2, type=int, help='Numbers of AddBlocks at AddResblock')272parser.add_argument('--same_person', default=0.2, type=float, help='Probability of using same person identity during training')273parser.add_argument('--same_identity', default=True, type=bool, help='Using simswap approach, when source_id = target_id. Only possible with vgg=True')274parser.add_argument('--diff_eq_same', default=False, type=bool, help='Don\'t use info about where is defferent identities')275parser.add_argument('--pretrained', default=True, type=bool, help='If using the pretrained weights for training or not')276parser.add_argument('--discr_force', default=False, type=bool, help='If True Discriminator would not train when adversarial loss is high')277parser.add_argument('--scheduler', default=False, type=bool, help='If True decreasing LR is used for learning of generator and discriminator')278parser.add_argument('--scheduler_step', default=5000, type=int)279parser.add_argument('--scheduler_gamma', default=0.2, type=float, help='It is value, which shows how many times to decrease LR')280parser.add_argument('--eye_detector_loss', default=False, type=bool, help='If True eye loss with using AdaptiveWingLoss detector is applied to generator')281# info about this run282parser.add_argument('--use_wandb', default=False, type=bool, help='Use wandb to track your experiments or not')283parser.add_argument('--run_name', required=True, type=str, help='Name of this run. Used to create folders where to save the weights.')284parser.add_argument('--wandb_project', default='your-project-name', type=str)285parser.add_argument('--wandb_entity', default='your-login', type=str)286# training params you probably don't want to change287parser.add_argument('--batch_size', default=16, type=int)288parser.add_argument('--lr_G', default=4e-4, type=float)289parser.add_argument('--lr_D', default=4e-4, type=float)290parser.add_argument('--max_epoch', default=2000, type=int)291parser.add_argument('--show_step', default=500, type=int)292parser.add_argument('--save_epoch', default=1, type=int)293parser.add_argument('--optim_level', default='O2', type=str)294295args = parser.parse_args()296297if args.vgg==False and args.same_identity==True:298raise ValueError("Sorry, you can't use some other dataset than VGG2 Faces with param same_identity=True")299300if args.use_wandb==True:301wandb.init(project=args.wandb_project, entity=args.wandb_entity, settings=wandb.Settings(start_method='fork'))302303config = wandb.config304config.dataset_path = args.dataset_path305config.weight_adv = args.weight_adv306config.weight_attr = args.weight_attr307config.weight_id = args.weight_id308config.weight_rec = args.weight_rec309config.weight_eyes = args.weight_eyes310config.same_person = args.same_person311config.Vgg2Face = args.vgg312config.same_identity = args.same_identity313config.diff_eq_same = args.diff_eq_same314config.discr_force = args.discr_force315config.scheduler = args.scheduler316config.scheduler_step = args.scheduler_step317config.scheduler_gamma = args.scheduler_gamma318config.eye_detector_loss = args.eye_detector_loss319config.pretrained = args.pretrained320config.run_name = args.run_name321config.G_path = args.G_path322config.D_path = args.D_path323config.batch_size = args.batch_size324config.lr_G = args.lr_G325config.lr_D = args.lr_D326elif not os.path.exists('./images'):327os.mkdir('./images')328329# Создаем папки, чтобы было куда сохранять последние веса моделей, а также веса с каждой эпохи330if not os.path.exists(f'./saved_models_{args.run_name}'):331os.mkdir(f'./saved_models_{args.run_name}')332os.mkdir(f'./current_models_{args.run_name}')333334main(args)335336337