YK{j=Od;t7(8T!4q*mOT3#^mFH%*m95%Rn>I*7b#T}?P&TENL
zqS7j-0$l8!*7v)$fqj~fC3Y?8yN&St=~?nwXx^FgvikWB`!0iV{CmX{wYv}iOART4
zulyTSTo>IIi}|!y`Zzl^f`UC_U%NppJQZw$D_^m*Y!2XhOYDb|mR?i|`eBiMDj{)F
zEesIo3v@K+>2I89mx=@4L2bU_VveL)xh2_(s7|bvE$&MWp?;-1g&)mV`Znb$8)Xb<
zmFXfuv1uEImVeUbg_5)#EQTHjXOo@_Z_HB4fX1>~oWZh*K3u_osoe`%FMz(*lm0`=
z+I4)E9>~VF)Sp$k?e;?A&?|PUDGP=%V5H_+pH4L~*=aQP_6U%~ki@U;Ft<9n{lSUq
zGvCm7@m?-Fg3wD%oSQ>-Pq(hHP)?;zsFD79s6cdy;YqaWL$O0NUh?xR1)B_7%A}W%
zS?5Uc1_OZV5Kqm36u+PBE*X0uQxAJ0)#G~KvTr4LL|hnG8}M|b!~13cPo%lxv-~LP
zS~&~WCa3aSi(wVcNZqlnF~|N1*~y;;dkG7LO^sD-bp3-vy4HR)Za>|KFYG+TGODxR8y#$P!W!6OFRX7XSE)D?VVhApA-uBA%REc1+G+$Tl+LRSBOqQo}@`rYM)|9wKA1gnjHoY
zF@Pa@%Vl3?h)7tp8kz`3&2&@blAL?KBtxKry#{6D^*(Cs;B%uv)YF2TlQvD{Xl%iwPIwi
z_F?B#tS1ovOI=YG+u##;OmD|0Bn%SMO;DpDAXQdqS>~e
z?1CxV#WSSd&3zEzeNgX9OHjvH)WV>M*i1=7wEs7$#`sF~
z8Z9X-t5Z8eXD*QksSJIFwp(?rwBTyk=7NrnLSe0vrgN+UuRGYc;!J?{KAVHCnj(8c
z)?7FqGAHL=8lA)@!VIc!?YtNn)*!LGl3LsN(89Z&Ic3N*#+9TWPYR~?U-*Pzx1tPu
zIH5##K1sco+(f*2N8sLHUW8tVXyfA~c*)aw8oR2S6_h`sP<3pPI3h~bB%W7QrY@~44PewQ?hd@A0H3l7Fta_w<1-y!
z+Vr2<uF<)ib6G{|60DsFIAdZUhz8#HLt%t`s7LYdF4BL>r=PCOW)z1
z=AII+y2j6CigfucR1zDvA9rb^QJFt4)Rxs0Vg=&S(hDVNb;)d43SK)=KUU6V4fZYS
z$e~{jv5OoIaQzXVTc6epVn#L3$Jae9*?#Tuj>{P+soE?L_4J2NVaNa2jR~Cd^?p3!JV$wJth#3
z0v}0v-YPzm5FX^;Pv4v?PWnAZyHJC;N)xf4V#KzS2*UAtN=RvSf_#;>4>`(=ZJUcS
z+|VYp+rI0x`HDGqWOLGLaFzo}(9E+lll_oVjtDVW3TIXwX~bXoWqb-**hQsmd$Kp`
z2%(qLcJk
z5w4+pxh2M%y`Sc-p@oWPofMO;i;&2pxMxlDk{kRdz^?N;$^gLFv1w9UAQ0t;PiYG`
zGzuv$<-E`c2i&x4>S?s4PRg39{_wCRM~2Ko{l%
z_)n#gE37DJ#t#;W^qat+9obO9`ef>4?YXlWUOqOZ-3L(Rl~t7!0Iv^l{P1
znZw;ZCSF95}`ZZOLuu3$8R)Q1-Uuv-cem4JPcU+(kM8;Imr-GYJxtf?i4_I
zuaGJ1x);PaXrs8gJUlFM#PbO`f&J+q?Tvk)xS>YX{m$Z8c3cThyeJvy>$M;+PF~;*
zuwAl5DdLD>c8F)GrJ2|l<}!{
z#QB6#>NZ?%S%EqkVmz1-;cUjr@ESX+Eg#dac&ya|*hMu?z(f8vjOy^3hMP*LJV>`s
zFxn3%t?fcZi^{uHz%ENXme$7s|&sT{dVmP?T~30$Z6^hm&~fx>QJbjf#T!9
z#N$jFsRuEodMANYGb`|MtE0vqS%oapolaS(?!K>lYHH&&D1FA*!TU2dU)={hlsjE)
zg&d0-LOgk*s<7+UGri9)0%p9
zC0ieVNp)jEI=|T?<@G7y(q~0$5!c+lGlAxK0Us)%SJjuB!8%GP8U8O2h6{
z*fyu|ZN`aTUBvF#g0k#ek*S;)2wH`Ufn)YnsUhzl=sN|9-mFMyec@EySVXGI>RAlo
zxUR+BzqXc4$qM!}stYv4(SP(w_1&Of@*n=3hJXg0wT%~^ldGrlp-+Zh%pEXp5i=aS
zE2hr3i`+g2s?4U1-tULEjgK+B^_@Y)nn!TlTBb97kz?Ojl#O8Yvfd0pI@@6X#Ez#5
zM^?P`E_DTTQClbB`eTz5M0AoAi~L>0TdDXSunp+j
zolI~#1&Y&5*rHAKh8fijFyW8^cCr-SPH~D%3@g|cU&seGA~bpgaQlGwE>461+9V7_
zMTi}<`#Hifg}yAW4%sj>yo_R#63<@iD7?W;@G*G(NIljzZ?q3Pd3fnHQ2vNJ59t%r&jE?iBfBFUfLmZUplfNBY@09
zayVPpE}6%=xOzSZiY1AusMP$ie-ebXGLVKMh1^7keLQ@AMqZpb5)aJ*p6=+bKBAG<
zVHIv|S5#n?zw*G8-Lm9K>2c7)1++Pq72>tfMOI=N*prUGow
zTBu5X$Z@3PR|;qOcU)hZ_iHG;R=7%LC6nI_XA~~wig_dX?P`>FF`KhElT!L!vFKoV1!+71U)na1Tz@7
zf%UHw=Kv=gBU5`YbK@_zUb4=IB{0=+4gLYG`~AGU=TN@Fn<Hc~Ts5JGE~oA-AB
za6n(lvX4XcV^HBLw)*PT%$`z`Q;%Fr9~O#xcDJg^XUBu`I$T-&630M#6-esh?XW!o
z8GcmP%s8B2?j)`GsLerP#G!X(IQyd*rP1ev{4J#eux4x;KAo$Z
zsZ6qBm)I84d*vuuZuus%*yCDw(RQ-Bq6$-PgBeWMqlZF!Ld^7Li$HuiT~U@TFO)
zr^-TN_KjQ=5SAPoVw5E{X)lDi(moo>d*pR=%Ik-*5Q_;HG`vU8NL)y<4Pg>!6tv~!
zzM}EXD1UGdg!$fsX{F9E_Knzb`rE7%$+S2u{Rg0L#27ZZIKH-^>#J0n&+Y@%+PKj=
zBu-C11lt&hB?IjFSn>fG#)a_aJE+I{W_;8*
zF90$LuB+ONzA8uhy)e$V)ed)?U&VM&U)qo$rGK~(D8v3(bqts3JsIX~5y3)J+ih_=
zHdddN=&6Gr1hm4QZTRUOd=)-CqI@9v7#rjZN2AaRizeVFv4d`;Ljp0O)#HCo
z{z(0m@N$WSm)E-3k0DCddZ|F`K&ox?2mTnjH7o~J>l?wB8q%no
z-^PcCfOSyEFATFYHi)~?tmJ@;N5Rt~Y!C0yWqXYJl9s<^D^P?Qkt;-h#A6I?w-{B(Xf0E>ZFTw_XDKzrSBNCiuFXQ2S#(Q86G8C+q)i2&U*
zlVKV4{i{fFdXw#x^l@F5+A*{to-Lzf{t?IMWx|s51aY~
z3BFIayWy!}j1GEGl&CbdgjSd&_;k-o4BsRVYXup8G!VtZ%tDe2s2N34gjq*t
z9)mMY^kPHz9RS%4F+n#cP0xJ5gZ-qZ-j#Ip&^;-oA)4LJbLL$ZlFhwK&vHWj<^^};x4Btq
z8*&_f5YOi#q`nD;8aQ$1H_C=`?uaw+l#TV>A*Htr%H5vld_e}Jf#_CWtlDDrSW=N(
z&;B|a|KAa_V7>~3fxW5qa}YKpB_-Jt_`eM>;rAD%<#!yGUop#LSLC4CF#=Cu*C<9k
zd6yU{C#`KI_cLTXeJD~^P*)zK-_(1$`+GN4RkWYlbC?4?90uJS9Q|E#;%!cm64g-s
z+Rz5>6}vCK8`>dLa=Q6ToaU`d67Y=e^xucVZF9uC6V=kO2s7@dY-Pl2-E6aIP{guk
z@K6Qo4o?q-xJ8i+sp+i^UQR)$#uUyI!diZ)5Ia(=w*IjsSXVydNOsvBvlsfpzKjl-
zEMNp%NkL#K3}5PpVxMZ}Bs_MKBqtpXN05&eip!pmjre(SM+;A^y~jr*5X)HC|>LMOrZ$X
z-F{0skC}RhfBflYN$|=)rez(~L>39jlb{613)c}A6|AA$H>)uV8wDptHNSJ?yrkrq
zxPw{0A}XgVL|~B;%v)Y;UELTT0NK7gO>v!Q8XiGveP=^7oyO%kaW)Bk&ZtyhiPGnJ
zg;IhKFZ*LnaN0>@)15Brm0Jg9&f$7<6@0Lfm9f9|`$Q}XSWlu&O9ce)BXs=ybumF=A3{+h|nmXwH>WbCJw*q?DaDr~+Jc9>6B;X6Es=a={
zZ)#1j4~_o-Q#OEi$A`1pLF;P*Atz^}oB{mnykE0$^TW3MJ@hshLpn9h(=
zuV8X$FN}8QU%N;UhLcfGzg&al|uj
zDbpf9lmli)<>EpagtroV9}t4^cI~m
zq`2ip^R{e;%j9{)m#&M5OWV)r_St}aZI7={+)0Q-94Feb8aNyf^1RQY_@ei7-KjB*
zs9+!6gL#?_C;)P|=)N?Kcx2ylp;v{1@fyeb>SR`R%eKG|bVw*1h(DzK=Pd+oBnU_d
za`0!R-hXYc-wFKxz0j`&ena3(A|NlGuBK-df
z|M4<_e-`rj>i~X5{%w!;KV^Tv9N>9wpL6T~sN2Ao_P>q(i(>y={v5LYN3IC=B7fxn
zMy~&Fh5m&S{~dl6x)_WV{2MI(tHA%gQGU7o-{F@;UH2be>v?9MJGfuM-!=(kap3S@_R1p39$iHUG%HH2#+VqvQEp|9lMiM^CN#xBed^!sq(u)W$#h
zT5vV~4~FA&`E$1WANlCN+UyVM{kQo4(t2TFfZAWS{{>q6=XL%Zwf4tq?LUQoz-!9#
Vu)q2aV7Lw@1O_;3N(R4v{Xh7>?=k=Y
literal 0
HcmV?d00001
diff --git a/Wav2Lip-master/audio.py b/Wav2Lip-master/audio.py
new file mode 100644
index 00000000..32b20c44
--- /dev/null
+++ b/Wav2Lip-master/audio.py
@@ -0,0 +1,136 @@
+import librosa
+import librosa.filters
+import numpy as np
+# import tensorflow as tf
+from scipy import signal
+from scipy.io import wavfile
+from hparams import hparams as hp
+
+def load_wav(path, sr):
+ return librosa.core.load(path, sr=sr)[0]
+
+def save_wav(wav, path, sr):
+ wav *= 32767 / max(0.01, np.max(np.abs(wav)))
+ #proposed by @dsmiller
+ wavfile.write(path, sr, wav.astype(np.int16))
+
+def save_wavenet_wav(wav, path, sr):
+ librosa.output.write_wav(path, wav, sr=sr)
+
+def preemphasis(wav, k, preemphasize=True):
+ if preemphasize:
+ return signal.lfilter([1, -k], [1], wav)
+ return wav
+
+def inv_preemphasis(wav, k, inv_preemphasize=True):
+ if inv_preemphasize:
+ return signal.lfilter([1], [1, -k], wav)
+ return wav
+
+def get_hop_size():
+ hop_size = hp.hop_size
+ if hop_size is None:
+ assert hp.frame_shift_ms is not None
+ hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
+ return hop_size
+
+def linearspectrogram(wav):
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
+ S = _amp_to_db(np.abs(D)) - hp.ref_level_db
+
+ if hp.signal_normalization:
+ return _normalize(S)
+ return S
+
+def melspectrogram(wav):
+ D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
+ S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
+
+ if hp.signal_normalization:
+ return _normalize(S)
+ return S
+
+def _lws_processor():
+ import lws
+ return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
+
+def _stft(y):
+ if hp.use_lws:
+ return _lws_processor(hp).stft(y).T
+ else:
+ return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
+
+##########################################################
+#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
+def num_frames(length, fsize, fshift):
+ """Compute number of time frames of spectrogram
+ """
+ pad = (fsize - fshift)
+ if length % fshift == 0:
+ M = (length + pad * 2 - fsize) // fshift + 1
+ else:
+ M = (length + pad * 2 - fsize) // fshift + 2
+ return M
+
+
+def pad_lr(x, fsize, fshift):
+ """Compute left and right padding
+ """
+ M = num_frames(len(x), fsize, fshift)
+ pad = (fsize - fshift)
+ T = len(x) + 2 * pad
+ r = (M - 1) * fshift + fsize - T
+ return pad, pad + r
+##########################################################
+#Librosa correct padding
+def librosa_pad_lr(x, fsize, fshift):
+ return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
+
+# Conversions
+_mel_basis = None
+
+def _linear_to_mel(spectogram):
+ global _mel_basis
+ if _mel_basis is None:
+ _mel_basis = _build_mel_basis()
+ return np.dot(_mel_basis, spectogram)
+
+def _build_mel_basis():
+ assert hp.fmax <= hp.sample_rate // 2
+ return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels,
+ fmin=hp.fmin, fmax=hp.fmax)
+
+def _amp_to_db(x):
+ min_level = np.exp(hp.min_level_db / 20 * np.log(10))
+ return 20 * np.log10(np.maximum(min_level, x))
+
+def _db_to_amp(x):
+ return np.power(10.0, (x) * 0.05)
+
+def _normalize(S):
+ if hp.allow_clipping_in_normalization:
+ if hp.symmetric_mels:
+ return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
+ -hp.max_abs_value, hp.max_abs_value)
+ else:
+ return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
+
+ assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
+ if hp.symmetric_mels:
+ return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
+ else:
+ return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
+
+def _denormalize(D):
+ if hp.allow_clipping_in_normalization:
+ if hp.symmetric_mels:
+ return (((np.clip(D, -hp.max_abs_value,
+ hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
+ + hp.min_level_db)
+ else:
+ return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
+
+ if hp.symmetric_mels:
+ return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
+ else:
+ return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
diff --git a/Wav2Lip-master/checkpoints/README.md b/Wav2Lip-master/checkpoints/README.md
new file mode 100644
index 00000000..8580a0de
--- /dev/null
+++ b/Wav2Lip-master/checkpoints/README.md
@@ -0,0 +1,2 @@
+Place all your checkpoints (.pth files) here.
+sk-ue9Q9QZARKiuuMNZBkggmA.f4RZqAPEZPtnUBpColqyj_KZat84vqlj
\ No newline at end of file
diff --git a/Wav2Lip-master/color_syncnet_train.py b/Wav2Lip-master/color_syncnet_train.py
new file mode 100644
index 00000000..afa00544
--- /dev/null
+++ b/Wav2Lip-master/color_syncnet_train.py
@@ -0,0 +1,279 @@
+from os.path import dirname, join, basename, isfile
+from tqdm import tqdm
+
+from models import SyncNet_color as SyncNet
+import audio
+
+import torch
+from torch import nn
+from torch import optim
+import torch.backends.cudnn as cudnn
+from torch.utils import data as data_utils
+import numpy as np
+
+from glob import glob
+
+import os, random, cv2, argparse
+from hparams import hparams, get_image_list
+
+parser = argparse.ArgumentParser(description='Code to train the expert lip-sync discriminator')
+
+parser.add_argument("--data_root", help="Root folder of the preprocessed LRS2 dataset", required=True)
+
+parser.add_argument('--checkpoint_dir', help='Save checkpoints to this directory', required=True, type=str)
+parser.add_argument('--checkpoint_path', help='Resumed from this checkpoint', default=None, type=str)
+
+args = parser.parse_args()
+
+
+global_step = 0
+global_epoch = 0
+use_cuda = torch.cuda.is_available()
+print('use_cuda: {}'.format(use_cuda))
+
+syncnet_T = 5
+syncnet_mel_step_size = 16
+
+class Dataset(object):
+ def __init__(self, split):
+ self.all_videos = get_image_list(args.data_root, split)
+
+ def get_frame_id(self, frame):
+ return int(basename(frame).split('.')[0])
+
+ def get_window(self, start_frame):
+ start_id = self.get_frame_id(start_frame)
+ vidname = dirname(start_frame)
+
+ window_fnames = []
+ for frame_id in range(start_id, start_id + syncnet_T):
+ frame = join(vidname, '{}.jpg'.format(frame_id))
+ if not isfile(frame):
+ return None
+ window_fnames.append(frame)
+ return window_fnames
+
+ def crop_audio_window(self, spec, start_frame):
+ # num_frames = (T x hop_size * fps) / sample_rate
+ start_frame_num = self.get_frame_id(start_frame)
+ start_idx = int(80. * (start_frame_num / float(hparams.fps)))
+
+ end_idx = start_idx + syncnet_mel_step_size
+
+ return spec[start_idx : end_idx, :]
+
+
+ def __len__(self):
+ return len(self.all_videos)
+
+ def __getitem__(self, idx):
+ while 1:
+ idx = random.randint(0, len(self.all_videos) - 1)
+ vidname = self.all_videos[idx]
+
+ img_names = list(glob(join(vidname, '*.jpg')))
+ if len(img_names) <= 3 * syncnet_T:
+ continue
+ img_name = random.choice(img_names)
+ wrong_img_name = random.choice(img_names)
+ while wrong_img_name == img_name:
+ wrong_img_name = random.choice(img_names)
+
+ if random.choice([True, False]):
+ y = torch.ones(1).float()
+ chosen = img_name
+ else:
+ y = torch.zeros(1).float()
+ chosen = wrong_img_name
+
+ window_fnames = self.get_window(chosen)
+ if window_fnames is None:
+ continue
+
+ window = []
+ all_read = True
+ for fname in window_fnames:
+ img = cv2.imread(fname)
+ if img is None:
+ all_read = False
+ break
+ try:
+ img = cv2.resize(img, (hparams.img_size, hparams.img_size))
+ except Exception as e:
+ all_read = False
+ break
+
+ window.append(img)
+
+ if not all_read: continue
+
+ try:
+ wavpath = join(vidname, "audio.wav")
+ wav = audio.load_wav(wavpath, hparams.sample_rate)
+
+ orig_mel = audio.melspectrogram(wav).T
+ except Exception as e:
+ continue
+
+ mel = self.crop_audio_window(orig_mel.copy(), img_name)
+
+ if (mel.shape[0] != syncnet_mel_step_size):
+ continue
+
+ # H x W x 3 * T
+ x = np.concatenate(window, axis=2) / 255.
+ x = x.transpose(2, 0, 1)
+ x = x[:, x.shape[1]//2:]
+
+ x = torch.FloatTensor(x)
+ mel = torch.FloatTensor(mel.T).unsqueeze(0)
+
+ return x, mel, y
+
+logloss = nn.BCELoss()
+def cosine_loss(a, v, y):
+ d = nn.functional.cosine_similarity(a, v)
+ loss = logloss(d.unsqueeze(1), y)
+
+ return loss
+
+def train(device, model, train_data_loader, test_data_loader, optimizer,
+ checkpoint_dir=None, checkpoint_interval=None, nepochs=None):
+
+ global global_step, global_epoch
+ resumed_step = global_step
+
+ while global_epoch < nepochs:
+ running_loss = 0.
+ prog_bar = tqdm(enumerate(train_data_loader))
+ for step, (x, mel, y) in prog_bar:
+ model.train()
+ optimizer.zero_grad()
+
+ # Transform data to CUDA device
+ x = x.to(device)
+
+ mel = mel.to(device)
+
+ a, v = model(mel, x)
+ y = y.to(device)
+
+ loss = cosine_loss(a, v, y)
+ loss.backward()
+ optimizer.step()
+
+ global_step += 1
+ cur_session_steps = global_step - resumed_step
+ running_loss += loss.item()
+
+ if global_step == 1 or global_step % checkpoint_interval == 0:
+ save_checkpoint(
+ model, optimizer, global_step, checkpoint_dir, global_epoch)
+
+ if global_step % hparams.syncnet_eval_interval == 0:
+ with torch.no_grad():
+ eval_model(test_data_loader, global_step, device, model, checkpoint_dir)
+
+ prog_bar.set_description('Loss: {}'.format(running_loss / (step + 1)))
+
+ global_epoch += 1
+
+def eval_model(test_data_loader, global_step, device, model, checkpoint_dir):
+ eval_steps = 1400
+ print('Evaluating for {} steps'.format(eval_steps))
+ losses = []
+ while 1:
+ for step, (x, mel, y) in enumerate(test_data_loader):
+
+ model.eval()
+
+ # Transform data to CUDA device
+ x = x.to(device)
+
+ mel = mel.to(device)
+
+ a, v = model(mel, x)
+ y = y.to(device)
+
+ loss = cosine_loss(a, v, y)
+ losses.append(loss.item())
+
+ if step > eval_steps: break
+
+ averaged_loss = sum(losses) / len(losses)
+ print(averaged_loss)
+
+ return
+
+def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch):
+
+ checkpoint_path = join(
+ checkpoint_dir, "checkpoint_step{:09d}.pth".format(global_step))
+ optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None
+ torch.save({
+ "state_dict": model.state_dict(),
+ "optimizer": optimizer_state,
+ "global_step": step,
+ "global_epoch": epoch,
+ }, checkpoint_path)
+ print("Saved checkpoint:", checkpoint_path)
+
+def _load(checkpoint_path):
+ if use_cuda:
+ checkpoint = torch.load(checkpoint_path)
+ else:
+ checkpoint = torch.load(checkpoint_path,
+ map_location=lambda storage, loc: storage)
+ return checkpoint
+
+def load_checkpoint(path, model, optimizer, reset_optimizer=False):
+ global global_step
+ global global_epoch
+
+ print("Load checkpoint from: {}".format(path))
+ checkpoint = _load(path)
+ model.load_state_dict(checkpoint["state_dict"])
+ if not reset_optimizer:
+ optimizer_state = checkpoint["optimizer"]
+ if optimizer_state is not None:
+ print("Load optimizer state from {}".format(path))
+ optimizer.load_state_dict(checkpoint["optimizer"])
+ global_step = checkpoint["global_step"]
+ global_epoch = checkpoint["global_epoch"]
+
+ return model
+
+if __name__ == "__main__":
+ checkpoint_dir = args.checkpoint_dir
+ checkpoint_path = args.checkpoint_path
+
+ if not os.path.exists(checkpoint_dir): os.mkdir(checkpoint_dir)
+
+ # Dataset and Dataloader setup
+ train_dataset = Dataset('train')
+ test_dataset = Dataset('val')
+
+ train_data_loader = data_utils.DataLoader(
+ train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True,
+ num_workers=hparams.num_workers)
+
+ test_data_loader = data_utils.DataLoader(
+ test_dataset, batch_size=hparams.syncnet_batch_size,
+ num_workers=8)
+
+ device = torch.device("cuda" if use_cuda else "cpu")
+
+ # Model
+ model = SyncNet().to(device)
+ print('total trainable params {}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad)))
+
+ optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad],
+ lr=hparams.syncnet_lr)
+
+ if checkpoint_path is not None:
+ load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False)
+
+ train(device, model, train_data_loader, test_data_loader, optimizer,
+ checkpoint_dir=checkpoint_dir,
+ checkpoint_interval=hparams.syncnet_checkpoint_interval,
+ nepochs=hparams.nepochs)
diff --git a/Wav2Lip-master/evaluation/README.md b/Wav2Lip-master/evaluation/README.md
new file mode 100644
index 00000000..affebbc0
--- /dev/null
+++ b/Wav2Lip-master/evaluation/README.md
@@ -0,0 +1,63 @@
+# Novel Evaluation Framework, new filelists, and using the LSE-D and LSE-C metric.
+
+Our paper also proposes a novel evaluation framework (Section 4). To evaluate on LRS2, LRS3, and LRW, the filelists are present in the `test_filelists` folder. Please use `gen_videos_from_filelist.py` script to generate the videos. After that, you can calculate the LSE-D and LSE-C scores using the instructions below. Please see [this thread](https://github.com/Rudrabha/Wav2Lip/issues/22#issuecomment-712825380) on how to calculate the FID scores.
+
+The videos of the ReSyncED benchmark for real-world evaluation will be released soon.
+
+### Steps to set-up the evaluation repository for LSE-D and LSE-C metric:
+We use the pre-trained syncnet model available in this [repository](https://github.com/joonson/syncnet_python).
+
+* Clone the SyncNet repository.
+```
+git clone https://github.com/joonson/syncnet_python.git
+```
+* Follow the procedure given in the above linked [repository](https://github.com/joonson/syncnet_python) to download the pretrained models and set up the dependencies.
+ * **Note: Please install a separate virtual environment for the evaluation scripts. The versions used by Wav2Lip and the publicly released code of SyncNet is different and can cause version mis-match issues. To avoid this, we suggest the users to install a separate virtual environment for the evaluation scripts**
+```
+cd syncnet_python
+pip install -r requirements.txt
+sh download_model.sh
+```
+* The above step should ensure that all the dependencies required by the repository is installed and the pre-trained models are downloaded.
+
+### Running the evaluation scripts:
+* Copy our evaluation scripts given in this folder to the cloned repository.
+```
+ cd Wav2Lip/evaluation/scores_LSE/
+ cp *.py syncnet_python/
+ cp *.sh syncnet_python/
+```
+**Note: We will release the test filelists for LRW, LRS2 and LRS3 shortly once we receive permission from the dataset creators. We will also release the Real World Dataset we have collected shortly.**
+
+* Our evaluation technique does not require ground-truth of any sorts. Given lip-synced videos we can directly calculate the scores from only the generated videos. Please store the generated videos (from our test sets or your own generated videos) in the following folder structure.
+```
+video data root (Folder containing all videos)
+├── All .mp4 files
+```
+* Change the folder back to the cloned repository.
+```
+cd syncnet_python
+```
+* To run evaluation on the LRW, LRS2 and LRS3 test files, please run the following command:
+```
+python calculate_scores_LRS.py --data_root /path/to/video/data/root --tmp_dir tmp_dir/
+```
+
+* To run evaluation on the ReSynced dataset or your own generated videos, please run the following command:
+```
+sh calculate_scores_real_videos.sh /path/to/video/data/root
+```
+* The generated scores will be present in the all_scores.txt generated in the ```syncnet_python/``` folder
+
+# Evaluation of image quality using FID metric.
+We use the [pytorch-fid](https://github.com/mseitzer/pytorch-fid) repository for calculating the FID metrics. We dump all the frames in both ground-truth and generated videos and calculate the FID score.
+
+
+# Opening issues related to evaluation scripts
+* Please open the issues with the "Evaluation" label if you face any issues in the evaluation scripts.
+
+# Acknowledgements
+Our evaluation pipeline in based on two existing repositories. LSE metrics are based on the [syncnet_python](https://github.com/joonson/syncnet_python) repository and the FID score is based on [pytorch-fid](https://github.com/mseitzer/pytorch-fid) repository. We thank the authors of both the repositories for releasing their wonderful code.
+
+
+
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/README.md b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/README.md
new file mode 100644
index 00000000..7112998b
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/README.md
@@ -0,0 +1,16 @@
+BEGAN fork from
+
+https://github.com/carpedm20/BEGAN-tensorflow
+
+with batched FID evaluation
+
+Needs fid.py from TTUR root directory. Please copy it here.
+
+Precalculated real world / training data statistics can be downloaded
+from here. Be sure to use the batched versions.
+
+http://bioinf.jku.at/research/ttur/ttur.html
+
+see sh/run.sh for options
+
+Fixed random seeds are removed.
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/config.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/config.py
new file mode 100644
index 00000000..4760775b
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/config.py
@@ -0,0 +1,80 @@
+#-*- coding: utf-8 -*-
+import argparse
+
+def str2bool(v):
+ return v.lower() in ('true', '1')
+
+arg_lists = []
+parser = argparse.ArgumentParser()
+
+def add_argument_group(name):
+ arg = parser.add_argument_group(name)
+ arg_lists.append(arg)
+ return arg
+
+# Network
+net_arg = add_argument_group('Network')
+net_arg.add_argument('--input_scale_size', type=int, default=64,
+ help='input image will be resized with the given value as width and height')
+net_arg.add_argument('--conv_hidden_num', type=int, default=128,
+ choices=[64, 128],help='n in the paper')
+net_arg.add_argument('--z_num', type=int, default=64, choices=[64, 128])
+
+# Data
+data_arg = add_argument_group('Data')
+data_arg.add_argument('--dataset', type=str, default='CelebA')
+data_arg.add_argument('--split', type=str, default='train')
+data_arg.add_argument('--batch_size', type=int, default=16)
+data_arg.add_argument('--grayscale', type=str2bool, default=False)
+data_arg.add_argument('--num_worker', type=int, default=4)
+
+# Training / test parameters
+train_arg = add_argument_group('Training')
+train_arg.add_argument('--is_train', type=str2bool, default=True)
+train_arg.add_argument('--optimizer', type=str, default='adam')
+train_arg.add_argument('--max_step', type=int, default=500000)
+train_arg.add_argument('--lr_update_step', type=int, default=100000, choices=[100000, 75000])
+train_arg.add_argument('--d_lr', type=float, default=0.00008)
+train_arg.add_argument('--g_lr', type=float, default=0.00008)
+train_arg.add_argument('--beta1', type=float, default=0.5)
+train_arg.add_argument('--beta2', type=float, default=0.999)
+train_arg.add_argument('--gamma', type=float, default=0.5)
+train_arg.add_argument('--lambda_k', type=float, default=0.001)
+train_arg.add_argument('--use_gpu', type=str2bool, default=True)
+
+train_arg.add_argument('--update_k', type=str2bool, default=True)
+train_arg.add_argument('--k_constant', type=float, default=0.06)
+
+# FID
+fid_arg = add_argument_group('FID')
+fid_arg.add_argument('--train_stats_file', type=str, default='train_stats.npz')
+fid_arg.add_argument('--eval_num_samples', type=int, default=10000)
+fid_arg.add_argument('--eval_batch_size', type=int, default=100)
+fid_arg.add_argument('--eval_step', type=int, default=1000)
+
+
+# Misc
+misc_arg = add_argument_group('Misc')
+misc_arg.add_argument('--load_checkpoint', type=str2bool, default=False)
+misc_arg.add_argument('--checkpoint_name', type=str, default='')
+misc_arg.add_argument('--start_step', type=int, default=0)
+misc_arg.add_argument('--log_step', type=int, default=500)
+misc_arg.add_argument('--save_step', type=int, default=5000)
+misc_arg.add_argument('--num_log_samples', type=int, default=3)
+misc_arg.add_argument('--log_level', type=str, default='INFO', choices=['INFO', 'DEBUG', 'WARN'])
+misc_arg.add_argument('--log_dir', type=str, default='logs')
+misc_arg.add_argument('--data_dir', type=str, default='data')
+misc_arg.add_argument('--test_data_path', type=str, default=None,
+ help='directory with images which will be used in test sample generation')
+misc_arg.add_argument('--sample_per_image', type=int, default=64,
+ help='# of sample per image during test sample generation')
+misc_arg.add_argument('--random_seed', type=int, default=123)
+
+def get_config():
+ config, unparsed = parser.parse_known_args()
+ if config.use_gpu:
+ data_format = 'NCHW'
+ else:
+ data_format = 'NHWC'
+ setattr(config, 'data_format', data_format)
+ return config, unparsed
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data/README.md b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data/README.md
new file mode 100644
index 00000000..89c8a992
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data/README.md
@@ -0,0 +1 @@
+Data folder, e.g. celebA_cropped or lsun_cropped directories are located here if not specified otherwise.
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data_loader.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data_loader.py
new file mode 100644
index 00000000..9f3f59d7
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/data_loader.py
@@ -0,0 +1,61 @@
+import os
+from PIL import Image
+from glob import glob
+import tensorflow as tf
+
+def get_loader(root, batch_size, scale_size, data_format, split=None, is_grayscale=False, seed=None):
+ dataset_name = os.path.basename(root)
+ if dataset_name in ['CelebA'] and split:
+ root = os.path.join(root, 'splits', split)
+
+ if dataset_name == "lsun_cropped":
+ #print("scan files... ")
+ paths = []
+ for i in range(304):
+ print("\rscan directories %d" % (i + 1), end="", flush=True)
+ paths += glob(os.path.join(root, str(i), "*.jpg"))
+ print()
+ print("%d files found" % len(paths))
+ else:
+ print("scan files... ", end="", flush=True)
+ paths = glob(os.path.join(root, "*.jpg"))
+ print(" %d files found" % len(paths))
+
+ tf_decode = tf.image.decode_jpeg
+
+ with Image.open(paths[0]) as img:
+ w, h = img.size
+ shape = [h, w, 3]
+
+ filename_queue = tf.train.string_input_producer(list(paths), shuffle=True, seed=seed)
+ reader = tf.WholeFileReader()
+ filename, data = reader.read(filename_queue)
+ image = tf_decode(data, channels=3)
+
+ if is_grayscale:
+ image = tf.image.rgb_to_grayscale(image)
+ image.set_shape(shape)
+
+ min_after_dequeue = 5000
+ capacity = min_after_dequeue + 3 * batch_size
+
+ queue = tf.train.shuffle_batch(
+ [image], batch_size=batch_size,
+ num_threads=4, capacity=capacity,
+ min_after_dequeue=min_after_dequeue, name='synthetic_inputs')
+
+ if dataset_name in ['CelebA']:
+ queue = tf.image.crop_to_bounding_box(queue, 50, 25, 128, 128)
+ queue = tf.image.resize_nearest_neighbor(queue, [scale_size, scale_size])
+ else:
+ pass
+ #queue = tf.image.resize_nearest_neighbor(queue, [scale_size, scale_size])
+
+ if data_format == 'NCHW':
+ queue = tf.transpose(queue, [0, 3, 1, 2])
+ elif data_format == 'NHWC':
+ pass
+ else:
+ raise Exception("[!] Unkown data_format: {}".format(data_format))
+
+ return tf.to_float(queue)
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/logs/README.md b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/logs/README.md
new file mode 100644
index 00000000..74d1068c
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/logs/README.md
@@ -0,0 +1 @@
+Tensorboard logfiles, samples, checkpoints will stored in autmatically generated subdirectories here.
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/main_fid.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/main_fid.py
new file mode 100644
index 00000000..aec66131
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/main_fid.py
@@ -0,0 +1,43 @@
+import numpy as np
+import tensorflow as tf
+
+from trainer_fid_batched import Trainer
+from config import get_config
+from data_loader import get_loader
+from utils import prepare_dirs_and_logger, save_config
+
+def main(config):
+ prepare_dirs_and_logger(config)
+
+ #rng = np.random.RandomState(config.random_seed)
+ #tf.set_random_seed(config.random_seed)
+
+ if config.is_train:
+ data_path = config.data_path
+ batch_size = config.batch_size
+ do_shuffle = True
+ else:
+ setattr(config, 'batch_size', 64)
+ if config.test_data_path is None:
+ data_path = config.data_path
+ else:
+ data_path = config.test_data_path
+ batch_size = config.sample_per_image
+ do_shuffle = False
+
+ data_loader = get_loader(
+ data_path, config.batch_size, config.input_scale_size,
+ config.data_format, config.split)
+ trainer = Trainer(config, data_loader)
+
+ if config.is_train:
+ save_config(config)
+ trainer.train()
+ else:
+ if not config.load_path:
+ raise Exception("[!] You should specify `load_path` to load a pretrained model")
+ trainer.test()
+
+if __name__ == "__main__":
+ config, unparsed = get_config()
+ main(config)
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/models.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/models.py
new file mode 100644
index 00000000..6d9c42dc
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/models.py
@@ -0,0 +1,91 @@
+import numpy as np
+import tensorflow as tf
+slim = tf.contrib.slim
+
+def GeneratorCNN(z, hidden_num, output_num, repeat_num, data_format, reuse):
+ with tf.variable_scope("G", reuse=reuse) as vs:
+ num_output = int(np.prod([8, 8, hidden_num]))
+ x = slim.fully_connected(z, num_output, activation_fn=None)
+ x = reshape(x, 8, 8, hidden_num, data_format)
+
+ for idx in range(repeat_num):
+ x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ if idx < repeat_num - 1:
+ x = upscale(x, 2, data_format)
+
+ out = slim.conv2d(x, 3, 3, 1, activation_fn=None, data_format=data_format)
+
+ variables = tf.contrib.framework.get_variables(vs)
+ return out, variables
+
+def DiscriminatorCNN(x, input_channel, z_num, repeat_num, hidden_num, data_format):
+ with tf.variable_scope("D") as vs:
+ # Encoder
+ x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+
+ prev_channel_num = hidden_num
+ for idx in range(repeat_num):
+ channel_num = hidden_num * (idx + 1)
+ x = slim.conv2d(x, channel_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ x = slim.conv2d(x, channel_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ if idx < repeat_num - 1:
+ x = slim.conv2d(x, channel_num, 3, 2, activation_fn=tf.nn.elu, data_format=data_format)
+ #x = tf.contrib.layers.max_pool2d(x, [2, 2], [2, 2], padding='VALID')
+
+ x = tf.reshape(x, [-1, np.prod([8, 8, channel_num])])
+ z = x = slim.fully_connected(x, z_num, activation_fn=None)
+
+ # Decoder
+ num_output = int(np.prod([8, 8, hidden_num]))
+ x = slim.fully_connected(x, num_output, activation_fn=None)
+ x = reshape(x, 8, 8, hidden_num, data_format)
+
+ for idx in range(repeat_num):
+ x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ x = slim.conv2d(x, hidden_num, 3, 1, activation_fn=tf.nn.elu, data_format=data_format)
+ if idx < repeat_num - 1:
+ x = upscale(x, 2, data_format)
+
+ out = slim.conv2d(x, input_channel, 3, 1, activation_fn=None, data_format=data_format)
+
+ variables = tf.contrib.framework.get_variables(vs)
+ return out, z, variables
+
+def int_shape(tensor):
+ shape = tensor.get_shape().as_list()
+ return [num if num is not None else -1 for num in shape]
+
+def get_conv_shape(tensor, data_format):
+ shape = int_shape(tensor)
+ # always return [N, H, W, C]
+ if data_format == 'NCHW':
+ return [shape[0], shape[2], shape[3], shape[1]]
+ elif data_format == 'NHWC':
+ return shape
+
+def nchw_to_nhwc(x):
+ return tf.transpose(x, [0, 2, 3, 1])
+
+def nhwc_to_nchw(x):
+ return tf.transpose(x, [0, 3, 1, 2])
+
+def reshape(x, h, w, c, data_format):
+ if data_format == 'NCHW':
+ x = tf.reshape(x, [-1, c, h, w])
+ else:
+ x = tf.reshape(x, [-1, h, w, c])
+ return x
+
+def resize_nearest_neighbor(x, new_size, data_format):
+ if data_format == 'NCHW':
+ x = nchw_to_nhwc(x)
+ x = tf.image.resize_nearest_neighbor(x, new_size)
+ x = nhwc_to_nchw(x)
+ else:
+ x = tf.image.resize_nearest_neighbor(x, new_size)
+ return x
+
+def upscale(x, scale, data_format):
+ _, h, w, _ = get_conv_shape(x, data_format)
+ return resize_nearest_neighbor(x, (h*scale, w*scale), data_format)
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/sh/run.sh b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/sh/run.sh
new file mode 100644
index 00000000..9b8b2352
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/sh/run.sh
@@ -0,0 +1,35 @@
+lr_d=0.00006
+lr_g=0.00004
+update_k=True
+k_constant=0.08
+dataset=celebA_cropped
+#dataset=lsun_cropped
+train_stats_file="stats/fid_stats_celeba.npz"
+eval_num_samples=50000
+eval_batch_size=200
+eval_step=1000
+lambda_k=0.001
+gamma=0.5
+python3 main_fid.py \
+--dataset $dataset \
+--train_stats_file $train_stats_file \
+--eval_num_samples $eval_num_samples \
+--eval_batch_size $eval_batch_size \
+--eval_step $eval_step \
+--input_height 64 \
+--output_height 64 \
+--is_crop False \
+--is_train True \
+--batch_size 16 \
+--log_dir "logs" \
+--d_lr $lr_d \
+--g_lr $lr_g \
+--lr_update_step 100000 \
+--lambda_k $lambda_k \
+--update_k $update_k \
+--k_constant $k_constant \
+--gamma $gamma \
+--max_step 500000 \
+--load_checkpoint False \
+--checkpoint_name "" \
+--start_step 0
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/stats/README.md b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/stats/README.md
new file mode 100644
index 00000000..6765f6d4
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/stats/README.md
@@ -0,0 +1 @@
+Folder for precalculated FID statistics
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/trainer_fid_batched.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/trainer_fid_batched.py
new file mode 100644
index 00000000..aba6d66c
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/trainer_fid_batched.py
@@ -0,0 +1,499 @@
+from __future__ import print_function
+
+import os
+#import StringIO
+import scipy.misc
+import numpy as np
+from glob import glob
+from tqdm import trange
+from itertools import chain
+from collections import deque
+
+from scipy.linalg import sqrtm
+from numpy.linalg import norm
+
+from models import *
+from utils import save_image
+ry:
+ import fid
+except ImportError:
+ print("fid.py not found. Please download fid.py from the TTUR github repository.")
+ raise SystemExit()
+
+
+def next(loader):
+ return loader.next()[0].data.numpy()
+
+def to_nhwc(image, data_format):
+ if data_format == 'NCHW':
+ new_image = nchw_to_nhwc(image)
+ else:
+ new_image = image
+ return new_image
+
+def to_nchw_numpy(image):
+ if image.shape[3] in [1, 3]:
+ new_image = image.transpose([0, 3, 1, 2])
+ else:
+ new_image = image
+ return new_image
+
+def norm_img(image, data_format=None):
+ image = image/127.5 - 1.
+ if data_format:
+ image = to_nhwc(image, data_format)
+ return image
+
+def denorm_img(norm, data_format):
+ return tf.clip_by_value(to_nhwc((norm + 1)*127.5, data_format), 0, 255)
+
+def slerp(val, low, high):
+ """Code from https://github.com/soumith/dcgan.torch/issues/14"""
+ omega = np.arccos(np.clip(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)), -1, 1))
+ so = np.sin(omega)
+ if so == 0:
+ return (1.0-val) * low + val * high # L'Hopital's rule/LERP
+ return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega) / so * high
+
+class Trainer(object):
+ def __init__(self, config, data_loader):
+
+ self.config = config
+ self.data_loader = data_loader
+ self.dataset = config.dataset
+
+ self.train_stats_file = config.train_stats_file
+
+ self.beta1 = config.beta1
+ self.beta2 = config.beta2
+ self.optimizer = config.optimizer
+ self.batch_size = config.batch_size
+
+ self.step = tf.Variable(0, name='step', trainable=False)
+
+ self.g_lr = tf.Variable(config.g_lr, name='g_lr')
+ self.d_lr = tf.Variable(config.d_lr, name='d_lr')
+
+ self.g_lr_update = tf.assign(self.g_lr, self.g_lr * 0.5, name='g_lr_update')
+ self.d_lr_update = tf.assign(self.d_lr, self.d_lr * 0.5, name='d_lr_update')
+
+ self.gamma = config.gamma
+ self.lambda_k = config.lambda_k
+
+ self.z_num = config.z_num
+ self.conv_hidden_num = config.conv_hidden_num
+ self.input_scale_size = config.input_scale_size
+
+ self.model_dir = config.model_dir
+
+ self.load_checkpoint = config.load_checkpoint
+ self.checkpoint_name = config.checkpoint_name
+
+ self.use_gpu = config.use_gpu
+ self.data_format = config.data_format
+
+ _, height, width, self.channel = \
+ get_conv_shape(self.data_loader, self.data_format)
+ self.repeat_num = int(np.log2(height)) - 2
+
+ self.start_step = config.start_step
+ self.log_step = config.log_step
+ self.max_step = config.max_step
+ self.save_step = config.save_step
+ self.lr_update_step = config.lr_update_step
+
+ # TTS stuff
+
+ self.update_k = config.update_k
+ self.k_constant = config.k_constant
+
+ #self.global_norm_thres = 100.0
+ #self.clip_value_min = -0.1
+ #self.clip_value_max = 0.1
+
+ self.eval_num_samples = config.eval_num_samples
+ self.eval_batch_size = config.eval_batch_size
+ self.eval_step = config.eval_step
+
+ self.output_height = config.input_scale_size
+ self.output_width = self.output_height
+
+ self.is_train = config.is_train
+ self.build_model()
+
+ self.saver = tf.train.Saver()
+ self.summary_writer = tf.summary.FileWriter(self.model_dir)
+
+ sv = tf.train.Supervisor(logdir=self.model_dir,
+ is_chief=True,
+ saver=self.saver,
+ summary_op=None,
+ summary_writer=self.summary_writer,
+ save_model_secs=3600,
+ global_step=self.step,
+ ready_for_local_init_op=None)
+
+ gpu_options = tf.GPUOptions(allow_growth=True)
+ sess_config = tf.ConfigProto(allow_soft_placement=True,
+ gpu_options=gpu_options)
+
+ self.sess = sv.prepare_or_wait_for_session(config=sess_config)
+
+ # dirty way to bypass graph finilization error
+ g = tf.get_default_graph()
+ g._finalized = False
+
+ if not self.is_train:
+ self.build_test_model()
+
+ def train(self):
+
+ print("load train stats..", end="")
+ # load precalculated training set statistics
+ f = np.load(self.train_stats_file)
+ mu_trn, sigma_trn = f['mu'][:], f['sigma'][:]
+ f.close()
+ print("ok")
+
+ z_fixed = np.random.uniform(-1, 1, size=(self.batch_size, self.z_num))
+
+ x_fixed = self.get_image_from_loader()
+ save_image(x_fixed, '{}/x_fixed.png'.format(self.model_dir))
+
+ prev_measure = 1
+ measure_history = deque([0]*self.lr_update_step, self.lr_update_step)
+
+ # load inference model
+ fid.create_inception_graph("inception-2015-12-05/classify_image_graph_def.pb")
+
+ #query_tensor = fid.get_Fid_query_tensor(self.sess)
+
+ if self.load_checkpoint:
+ if self.load(self.model_dir):
+ print(" [*] Load SUCCESS")
+ else:
+ print(" [!] Load failed...")
+
+ # Precallocate prediction array for kl/fid inception score
+ #print("preallocate %.3f GB for prediction array.." % (self.eval_num_samples * 2048 / (1024**3)), end=" ", flush=True)
+ inception_activations = np.ones([self.eval_num_samples, 2048])
+ #print("ok")
+
+ for step in trange(self.start_step, self.max_step):
+
+ # Optimize
+ self.sess.run([self.d_optim, self.g_optim])
+
+ # Feed dict
+ fetch_dict = {"measure": self.measure}
+
+ if self.update_k:
+ fetch_dict.update({"k_update": self.k_update})
+
+ if step % self.log_step == 0:
+ fetch_dict.update({
+ "summary": self.summary_op,
+ "g_loss": self.g_loss,
+ "d_loss": self.d_loss,
+ "k_t": self.k_t,
+ })
+
+ # Get summaries
+ result = self.sess.run(fetch_dict)
+
+ measure = result['measure']
+ measure_history.append(measure)
+
+ if step % self.log_step == 0:
+ self.summary_writer.add_summary(result['summary'], step)
+ self.summary_writer.flush()
+
+ g_loss = result['g_loss']
+ d_loss = result['d_loss']
+ k_t = result['k_t']
+
+ print("[{}/{}] Loss_D: {:.6f} Loss_G: {:.6f} measure: {:.4f}, k_t: {:.4f}". \
+ format(step, self.max_step, d_loss, g_loss, measure, k_t))
+
+ if step % (self.log_step * 10) == 0:
+ x_fake = self.generate(z_fixed, self.model_dir, idx=step)
+ self.autoencode(x_fixed, self.model_dir, idx=step, x_fake=x_fake)
+
+ if step % self.lr_update_step == self.lr_update_step - 1:
+ self.sess.run([self.g_lr_update, self.d_lr_update])
+
+ # FID
+ if step % self.eval_step == 0:
+
+ eval_batches_num = self.eval_num_samples // self.eval_batch_size
+
+ for eval_batch in range(eval_batches_num):
+
+ print("\rFID batch %d/%d" % (eval_batch + 1, eval_batches_num), end="", flush=True)
+
+ sample_z_eval = np.random.uniform(-1, 1, size=(self.eval_batch_size, self.z_num))
+ samples_eval = self.generate(sample_z_eval, self.model_dir, save=False)
+
+ activations_batch = fid.get_activations(samples_eval,
+ self.sess,
+ batch_size=self.eval_batch_size,
+ verbose=False)
+
+ frm = eval_batch * self.eval_batch_size
+ to = frm + self.eval_batch_size
+ inception_activations[frm:to,:] = activations_batch
+
+ print()
+
+ # calculate FID
+ print("FID:", end=" ", flush=True)
+ try:
+ mu_eval = np.mean(inception_activations, axis=0)
+ sigma_eval = np.cov(inception_activations, rowvar=False)
+ FID = fid.calculate_frechet_distance(mu_eval, sigma_eval, mu_trn, sigma_trn)
+ except Exception as e:
+ print(e)
+ FID = 500
+ print(FID)
+
+ self.sess.run(tf.assign(self.fid, FID))
+ summary_str = self.sess.run(self.fid_sum)
+ self.summary_writer.add_summary(summary_str, step)
+
+ #print("eval finished")
+
+
+ def build_model(self):
+
+ self.x = self.data_loader
+ x = norm_img(self.x)
+
+ self.z = tf.random_uniform(
+ (tf.shape(x)[0], self.z_num), minval=-1.0, maxval=1.0)
+
+ if self.update_k:
+ self.k_t = tf.Variable(0.0, trainable=False, name='k_t')
+ else:
+ self.k_t = tf.constant(self.k_constant, name="k_t")
+
+ G, self.G_var = GeneratorCNN(
+ self.z, self.conv_hidden_num, self.channel,
+ self.repeat_num, self.data_format, reuse=False)
+
+ d_out, self.D_z, self.D_var = DiscriminatorCNN(
+ tf.concat([G, x], 0), self.channel, self.z_num, self.repeat_num,
+ self.conv_hidden_num, self.data_format)
+ AE_G, AE_x = tf.split(d_out, 2, 0)
+
+ self.G = denorm_img(G, self.data_format)
+ self.AE_G, self.AE_x = denorm_img(AE_G, self.data_format), denorm_img(AE_x, self.data_format)
+
+ if self.optimizer == 'adam':
+ optimizer = tf.train.AdamOptimizer
+ else:
+ raise Exception("[!] Caution! Paper didn't use {} opimizer other than Adam".format(config.optimizer))
+
+ g_optimizer, d_optimizer = optimizer(self.g_lr), optimizer(self.d_lr)
+
+ self.d_loss_real = tf.reduce_mean(tf.abs(AE_x - x))
+ self.d_loss_fake = tf.reduce_mean(tf.abs(AE_G - G))
+
+ self.d_loss = self.d_loss_real - self.k_t * self.d_loss_fake
+ self.g_loss = tf.reduce_mean(tf.abs(AE_G - G))
+
+ self.d_optim = d_optimizer.minimize(self.d_loss, var_list=self.D_var)
+ self.g_optim = g_optimizer.minimize(self.g_loss, global_step=self.step, var_list=self.G_var)
+
+ #grads, vrbls = zip(*d_optimizer.compute_gradients(self.d_loss, self.D_var))
+ #grads, _ = tf.clip_by_global_norm(grads, self.global_norm_thres)
+ #grads = [
+ # tf.clip_by_value(grad, self.clip_value_min, self.clip_value_max)
+ # for grad in grads]
+ #grads = [tf.div(grad, tf.reduce_max(grad)) for grad in grads]
+ #self.d_optim = d_optimizer.apply_gradients(zip(grads, vrbls))
+
+ #grads, vrbls = zip(*g_optimizer.compute_gradients(self.g_loss, self.G_var))
+ #grads, _ = tf.clip_by_global_norm(grads, self.global_norm_thres)
+ #grads = [
+ # tf.clip_by_value(grad, self.clip_value_min, self.clip_value_max)
+ # for grad in grads]
+ #grads = [tf.div(grad, tf.reduce_max(grad)) for grad in grads]
+ #self.g_optim = g_optimizer.apply_gradients(zip(grads, vrbls), global_step=self.step)
+
+
+ self.balance = self.gamma * self.d_loss_real - self.g_loss
+ self.measure = self.d_loss_real + tf.abs(self.balance)
+
+ # k update
+
+ if self.update_k:
+ self.k_update = tf.assign(self.k_t,
+ tf.clip_by_value(self.k_t + self.lambda_k * self.balance, 0, 1))
+
+ self.summary_op = tf.summary.merge([
+ tf.summary.image("G", self.G),
+ tf.summary.image("AE_G", self.AE_G),
+ tf.summary.image("AE_x", self.AE_x),
+
+ tf.summary.scalar("loss/d_loss", self.d_loss),
+ tf.summary.scalar("loss/d_loss_real", self.d_loss_real),
+ tf.summary.scalar("loss/d_loss_fake", self.d_loss_fake),
+ tf.summary.scalar("loss/g_loss", self.g_loss),
+ tf.summary.scalar("misc/measure", self.measure),
+ tf.summary.scalar("misc/k_t", self.k_t),
+ tf.summary.scalar("misc/d_lr", self.d_lr),
+ tf.summary.scalar("misc/g_lr", self.g_lr),
+ tf.summary.scalar("misc/balance", self.balance),
+ ])
+
+ # TTS stuff
+
+ self.image_enc_data = tf.placeholder(tf.uint8,[self.output_height, self.output_width, 3])
+ self.encode_jpeg = tf.image.encode_jpeg(self.image_enc_data)
+
+ self.fid = tf.Variable(0.0, trainable=False)
+ self.fid_sum = tf.summary.scalar("FID", self.fid)
+
+
+ def build_test_model(self):
+ with tf.variable_scope("test") as vs:
+ # Extra ops for interpolation
+ z_optimizer = tf.train.AdamOptimizer(0.0001)
+
+ self.z_r = tf.get_variable("z_r", [self.batch_size, self.z_num], tf.float32)
+ self.z_r_update = tf.assign(self.z_r, self.z)
+
+ G_z_r, _ = GeneratorCNN(
+ self.z_r, self.conv_hidden_num, self.channel, self.repeat_num, self.data_format, reuse=True)
+
+ with tf.variable_scope("test") as vs:
+ self.z_r_loss = tf.reduce_mean(tf.abs(self.x - G_z_r))
+ self.z_r_optim = z_optimizer.minimize(self.z_r_loss, var_list=[self.z_r])
+
+ test_variables = tf.contrib.framework.get_variables(vs)
+ self.sess.run(tf.variables_initializer(test_variables))
+
+ def generate(self, inputs, root_path=None, path=None, idx=None, save=True):
+ x = self.sess.run(self.G, {self.z: inputs})
+ if path is None and save:
+ path = os.path.join(root_path, '{}_G.png'.format(idx))
+ save_image(x, path)
+ print("[*] Samples saved: {}".format(path))
+ return x
+
+ def autoencode(self, inputs, path, idx=None, x_fake=None):
+ items = {
+ 'real': inputs,
+ 'fake': x_fake,
+ }
+ for key, img in items.items():
+ if img is None:
+ continue
+ if img.shape[3] in [1, 3]:
+ img = img.transpose([0, 3, 1, 2])
+
+ x_path = os.path.join(path, '{}_D_{}.png'.format(idx, key))
+ x = self.sess.run(self.AE_x, {self.x: img})
+ save_image(x, x_path)
+ print("[*] Samples saved: {}".format(x_path))
+
+ def encode(self, inputs):
+ if inputs.shape[3] in [1, 3]:
+ inputs = inputs.transpose([0, 3, 1, 2])
+ return self.sess.run(self.D_z, {self.x: inputs})
+
+ def decode(self, z):
+ return self.sess.run(self.AE_x, {self.D_z: z})
+
+ def interpolate_G(self, real_batch, step=0, root_path='.', train_epoch=0):
+ batch_size = len(real_batch)
+ half_batch_size = int(batch_size/2)
+
+ self.sess.run(self.z_r_update)
+ tf_real_batch = to_nchw_numpy(real_batch)
+ for i in trange(train_epoch):
+ z_r_loss, _ = self.sess.run([self.z_r_loss, self.z_r_optim], {self.x: tf_real_batch})
+ z = self.sess.run(self.z_r)
+
+ z1, z2 = z[:half_batch_size], z[half_batch_size:]
+ real1_batch, real2_batch = real_batch[:half_batch_size], real_batch[half_batch_size:]
+
+ generated = []
+ for idx, ratio in enumerate(np.linspace(0, 1, 10)):
+ z = np.stack([slerp(ratio, r1, r2) for r1, r2 in zip(z1, z2)])
+ z_decode = self.generate(z, save=False)
+ generated.append(z_decode)
+
+ generated = np.stack(generated).transpose([1, 0, 2, 3, 4])
+ for idx, img in enumerate(generated):
+ save_image(img, os.path.join(root_path, 'test{}_interp_G_{}.png'.format(step, idx)), nrow=10)
+
+ all_img_num = np.prod(generated.shape[:2])
+ batch_generated = np.reshape(generated, [all_img_num] + list(generated.shape[2:]))
+ save_image(batch_generated, os.path.join(root_path, 'test{}_interp_G.png'.format(step)), nrow=10)
+
+ def interpolate_D(self, real1_batch, real2_batch, step=0, root_path="."):
+ real1_encode = self.encode(real1_batch)
+ real2_encode = self.encode(real2_batch)
+
+ decodes = []
+ for idx, ratio in enumerate(np.linspace(0, 1, 10)):
+ z = np.stack([slerp(ratio, r1, r2) for r1, r2 in zip(real1_encode, real2_encode)])
+ z_decode = self.decode(z)
+ decodes.append(z_decode)
+
+ decodes = np.stack(decodes).transpose([1, 0, 2, 3, 4])
+ for idx, img in enumerate(decodes):
+ img = np.concatenate([[real1_batch[idx]], img, [real2_batch[idx]]], 0)
+ save_image(img, os.path.join(root_path, 'test{}_interp_D_{}.png'.format(step, idx)), nrow=10 + 2)
+
+ def test(self):
+ root_path = "./"#self.model_dir
+
+ all_G_z = None
+ for step in range(3):
+ real1_batch = self.get_image_from_loader()
+ real2_batch = self.get_image_from_loader()
+
+ save_image(real1_batch, os.path.join(root_path, 'test{}_real1.png'.format(step)))
+ save_image(real2_batch, os.path.join(root_path, 'test{}_real2.png'.format(step)))
+
+ self.autoencode(
+ real1_batch, self.model_dir, idx=os.path.join(root_path, "test{}_real1".format(step)))
+ self.autoencode(
+ real2_batch, self.model_dir, idx=os.path.join(root_path, "test{}_real2".format(step)))
+
+ self.interpolate_G(real1_batch, step, root_path)
+ #self.interpolate_D(real1_batch, real2_batch, step, root_path)
+
+ z_fixed = np.random.uniform(-1, 1, size=(self.batch_size, self.z_num))
+ G_z = self.generate(z_fixed, path=os.path.join(root_path, "test{}_G_z.png".format(step)))
+
+ if all_G_z is None:
+ all_G_z = G_z
+ else:
+ all_G_z = np.concatenate([all_G_z, G_z])
+ save_image(all_G_z, '{}/G_z{}.png'.format(root_path, step))
+
+ save_image(all_G_z, '{}/all_G_z.png'.format(root_path), nrow=16)
+
+ def get_image_from_loader(self):
+ x = self.data_loader.eval(session=self.sess)
+ if self.data_format == 'NCHW':
+ x = x.transpose([0, 2, 3, 1])
+ return x
+
+ # Load checkpoint
+ def load(self, checkpoint_dir):
+ print(" [*] Reading checkpoints from %s..." % checkpoint_dir)
+ #checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
+
+ ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
+ if ckpt and ckpt.model_checkpoint_path:
+ ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
+ self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
+ print(" [*] Success to read {}".format(ckpt_name))
+ return True
+ else:
+ print(" [*] Failed to find a checkpoint")
+ return False
diff --git a/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/utils.py b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/utils.py
new file mode 100644
index 00000000..49235123
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/BEGAN_FID_batched/utils.py
@@ -0,0 +1,83 @@
+from __future__ import print_function
+
+import os
+import math
+import json
+import logging
+import numpy as np
+from PIL import Image
+from datetime import datetime
+
+def prepare_dirs_and_logger(config):
+ formatter = logging.Formatter("%(asctime)s:%(levelname)s::%(message)s")
+ logger = logging.getLogger()
+
+ for hdlr in logger.handlers:
+ logger.removeHandler(hdlr)
+
+ handler = logging.StreamHandler()
+ handler.setFormatter(formatter)
+
+ logger.addHandler(handler)
+
+ if config.load_checkpoint:
+ #if config.load_path.startswith(config.log_dir):
+ # config.model_dir = config.load_path
+ #else:
+ # if config.load_path.startswith(config.dataset):
+ # config.model_name = config.load_path
+ # else:
+ config.model_name = config.checkpoint_name
+ else:
+ config.model_name = "%s_%.6f_%.6f_%s" % (get_time(), config.d_lr, config.g_lr, config.update_k)
+
+ if not hasattr(config, 'model_dir'):
+ config.model_dir = os.path.join(config.log_dir, config.model_name)
+
+ config.data_path = os.path.join(config.data_dir, config.dataset)
+
+ for path in [config.log_dir, config.data_dir, config.model_dir]:
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+def get_time():
+ return datetime.now().strftime("%m%d_%H%M%S")
+
+def save_config(config):
+ param_path = os.path.join(config.model_dir, "params.json")
+
+ print("[*] MODEL dir: %s" % config.model_dir)
+ print("[*] PARAM path: %s" % param_path)
+
+ with open(param_path, 'w') as fp:
+ json.dump(config.__dict__, fp, indent=4, sort_keys=True)
+
+def rank(array):
+ return len(array.shape)
+
+def make_grid(tensor, nrow=8, padding=2,
+ normalize=False, scale_each=False):
+ """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
+ nmaps = tensor.shape[0]
+ xmaps = min(nrow, nmaps)
+ ymaps = int(math.ceil(float(nmaps) / xmaps))
+ height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
+ grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
+ k = 0
+ for y in range(ymaps):
+ for x in range(xmaps):
+ if k >= nmaps:
+ break
+ h, h_width = y * height + 1 + padding // 2, height - padding
+ w, w_width = x * width + 1 + padding // 2, width - padding
+
+ grid[h:h+h_width, w:w+w_width] = tensor[k]
+ k = k + 1
+ return grid
+
+def save_image(tensor, filename, nrow=8, padding=2,
+ normalize=False, scale_each=False):
+ ndarr = make_grid(tensor, nrow=nrow, padding=padding,
+ normalize=normalize, scale_each=scale_each)
+ im = Image.fromarray(ndarr)
+ im.save(filename)
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/README.md b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/README.md
new file mode 100644
index 00000000..b917a285
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/README.md
@@ -0,0 +1,22 @@
+# DCGAN for CelebA evaluated with FID (batched version)
+
+DCGAN fork from https://github.com/carpedm20/DCGAN-tensorflow
+
+Precalculated real world / trainng data statistics can be downloaded from:
+http://bioinf.jku.at/research/ttur/ttur.html
+
+## Usage
+- Copy the file fid.py from TTUR root into the DCGAN_FID_batched directory
+- Modify the dataset variable in run.sh
+- Modify the data_path variable in run.sh
+- Download the precalculated statistics (see above) and save them into the "stats" folder.
+- Modify the incept_path in file run.sh
+- Run the command: bash run.sh
+- Checkpoint, sample and Tensorboard log directories will be automatically created in logs.
+
+## FID evaluation: parameters fid_n_samples and fid_sample_batchsize
+The evaluation of the FID needs the comparison between precalculated statistics of real world data vs statistics of generated data.
+The calculation of the latter is a tradeoff between number of samples (the more the better) and available hardware. Two parameters
+in run.sh are concerned with this calculation: fid_n_samples and fid_sample_batchsize. The first parameter specifies the number of
+generated samples on which the statistics are calculated. Since this number should be high, it is very likely that it is not possible
+to generate this amount of samples at once. Thus the generation process is batched with batches of size fid_sample_batchsize.
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/logs/README.md b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/logs/README.md
new file mode 100644
index 00000000..7328fd0f
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/logs/README.md
@@ -0,0 +1 @@
+Tensorboard logfiles, samples, checkpoints will be stored in automatically generated subdirectories here.
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/main.py b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/main.py
new file mode 100644
index 00000000..fd9b0427
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/main.py
@@ -0,0 +1,159 @@
+import os
+#os.environ['CUDA_VISIBLE_DEVICES'] = "1"
+import numpy as np
+
+from model import DCGAN
+from utils import pp, visualize, to_json
+
+import tensorflow as tf
+import fid
+
+flags = tf.app.flags
+flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
+
+flags.DEFINE_float("learning_rate_d", 0.0002, "Discriminator learning rate of for adam [0.002]")
+flags.DEFINE_float("learning_rate_g", 0.0002, "Generator learning rate of for adam [0.0002]")
+flags.DEFINE_float("lr_decay_rate_d", 1.0, "Discriminator learning rate decay [1.0]")
+flags.DEFINE_float("lr_decay_rate_g", 1.0, "Generator learning rate decay [1.0]")
+
+flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
+flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]")
+flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
+flags.DEFINE_integer("input_height", 108, "The size of image to use (will be center cropped). [108]")
+flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
+flags.DEFINE_integer("output_height", 64, "The size of the output images to produce [64]")
+flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
+flags.DEFINE_integer("c_dim", 3, "Dimension of image color. [3]")
+flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
+flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
+flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
+flags.DEFINE_string("checkpoint_name", None, "Directory name to load a checkpoint from [None]")
+flags.DEFINE_boolean("load_checkpoint", False, "Load checkpoint [False]")
+flags.DEFINE_integer("counter_start", 0, "counter to start with [0]")
+flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
+flags.DEFINE_string("log_dir", "logs", "Directory name for summary logs [logs]")
+flags.DEFINE_boolean("is_train", False, "True for training, False for testing [False]")
+flags.DEFINE_boolean("is_crop", False, "True for training, False for testing [False]")
+flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
+
+# added parameters for batched fid
+flags.DEFINE_string("stats_path", None, "Path to pretrained statistics")
+flags.DEFINE_string("data_path", None, "Path to input data")
+flags.DEFINE_string("incept_path", None, "Path to inception net.")
+flags.DEFINE_integer("fid_n_samples", 10000, "Total number of samples generated to calculate the FID statistics. Will be adjusted if not a multiple of fid_sample_batchsize [10000]")
+flags.DEFINE_integer("fid_sample_batchsize", 5000, "Batchsize of batches that constitute all generated samples to calculate the FID statistics [5000]")
+flags.DEFINE_integer("fid_batch_size", 100, "Batchsize used for FID calculation [500]")
+flags.DEFINE_boolean("fid_verbose", True, "Report current state of FID calculation [True]")
+flags.DEFINE_integer("fid_eval_steps", 1000, "Evaluate FID after this number of minibatches")
+
+
+FLAGS = flags.FLAGS
+
+def main(_):
+
+ pp.pprint(flags.FLAGS.__flags)
+
+ # Create directories if necessary
+ if not os.path.exists(FLAGS.log_dir):
+ print("*** create log dir %s" % FLAGS.log_dir)
+ os.makedirs(FLAGS.log_dir)
+ if not os.path.exists(FLAGS.sample_dir):
+ print("*** create sample dir %s" % FLAGS.sample_dir)
+ os.makedirs(FLAGS.sample_dir)
+ if not os.path.exists(FLAGS.checkpoint_dir):
+ print("*** create checkpoint dir %s" % FLAGS.checkpoint_dir)
+ os.makedirs(FLAGS.checkpoint_dir)
+
+ # Write flags to log dir
+ flags_file = open("%s/flags.txt" % FLAGS.log_dir, "w")
+ for k, v in flags.FLAGS.__flags.items():
+ line = '{}, {}'.format(k, v)
+ print(line, file=flags_file)
+ flags_file.close()
+
+ if FLAGS.input_width is None:
+ FLAGS.input_width = FLAGS.input_height
+ if FLAGS.output_width is None:
+ FLAGS.output_width = FLAGS.output_height
+
+ if not os.path.exists(FLAGS.checkpoint_dir):
+ os.makedirs(FLAGS.checkpoint_dir)
+ if not os.path.exists(FLAGS.sample_dir):
+ os.makedirs(FLAGS.sample_dir)
+
+ run_config = tf.ConfigProto()
+ run_config.gpu_options.allow_growth=True
+
+ # load model
+ fid.create_inception_graph(FLAGS.incept_path)
+
+ with tf.Session(config=run_config) as sess:
+ # get querry tensor
+ if FLAGS.dataset == 'mnist':
+ dcgan = DCGAN(
+ sess,
+ input_width=FLAGS.input_width,
+ input_height=FLAGS.input_height,
+ output_width=FLAGS.output_width,
+ output_height=FLAGS.output_height,
+ batch_size=FLAGS.batch_size,
+ batch_size_m=FLAGS.batch_size_m,
+ y_dim=10,
+ c_dim=1,
+ dataset_name=FLAGS.dataset,
+ input_fname_pattern=FLAGS.input_fname_pattern,
+ is_crop=FLAGS.is_crop,
+ checkpoint_dir=FLAGS.checkpoint_dir,
+ sample_dir=FLAGS.sample_dir,
+ log_dir=FLAGS.log_dir,
+ stats_path=FLAGS.stats_path,
+ data_path=FLAGS.data_path,
+ fid_n_samples=FLAGS.fid_n_samples,
+ fid_sample_batchsize=FLAGS.fid_sample_batchsize,
+ fid_batch_size=FLAGS.fid_batch_size,
+ fid_verbose=FLAGS.fid_verbose,
+ beta1=FLAGS.beta1)
+ else:
+ dcgan = DCGAN(
+ sess,
+ input_width=FLAGS.input_width,
+ input_height=FLAGS.input_height,
+ output_width=FLAGS.output_width,
+ output_height=FLAGS.output_height,
+ batch_size=FLAGS.batch_size,
+ c_dim=FLAGS.c_dim,
+ dataset_name=FLAGS.dataset,
+ input_fname_pattern=FLAGS.input_fname_pattern,
+ is_crop=FLAGS.is_crop,
+ load_checkpoint=FLAGS.load_checkpoint,
+ counter_start=FLAGS.counter_start,
+ checkpoint_dir=FLAGS.checkpoint_dir,
+ sample_dir=FLAGS.sample_dir,
+ log_dir=FLAGS.log_dir,
+ stats_path=FLAGS.stats_path,
+ data_path=FLAGS.data_path,
+ fid_n_samples=FLAGS.fid_n_samples,
+ fid_sample_batchsize=FLAGS.fid_sample_batchsize,
+ fid_batch_size=FLAGS.fid_batch_size,
+ fid_verbose=FLAGS.fid_verbose,
+ beta1=FLAGS.beta1)
+
+ if FLAGS.is_train:
+ dcgan.train(FLAGS)
+ else:
+ if not dcgan.load(FLAGS.checkpoint_dir):
+ raise Exception("[!] Train a model first, then run test mode")
+
+
+ # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
+ # [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
+ # [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
+ # [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
+ # [dcgan.h4_w, dcgan.h4_b, None])
+
+ # Below is codes for visualization
+ #OPTION = 4
+ #visualize(sess, dcgan, FLAGS, OPTION)
+
+if __name__ == '__main__':
+ tf.app.run()
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/model.py b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/model.py
new file mode 100644
index 00000000..b4aeccf1
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/model.py
@@ -0,0 +1,523 @@
+import os
+import time
+import math
+from glob import glob
+import tensorflow as tf
+import numpy as np
+from random import sample
+
+from ops import *
+from utils import *
+
+# import fid
+import fid
+
+def conv_out_size_same(size, stride):
+ return int(math.ceil(float(size) / float(stride)))
+
+class DCGAN(object):
+ def __init__(self, sess, input_height=108, input_width=108, is_crop=True,
+ batch_size=64, sample_num = 64,
+ output_height=64, output_width=64,
+ y_dim=None, z_dim=100, gf_dim=64, df_dim=64,
+ gfc_dim=1024, dfc_dim=1024, c_dim=3,
+ dataset_name='default',
+ input_fname_pattern='*.jpg',
+ load_checkpoint=False, counter_start=0,
+ checkpoint_dir=None,
+ sample_dir=None,
+ log_dir=None,
+ stats_path=None,
+ data_path=None,
+ fid_n_samples=10000,
+ fid_sample_batchsize=5000,
+ fid_batch_size=500,
+ fid_verbose=False,
+ beta1=0.5):
+ """
+
+ Args:
+ sess: TensorFlow session
+ batch_size: The size of batch. Should be specified before training.
+ y_dim: (optional) Dimension of dim for y. [None]
+ z_dim: (optional) Dimension of dim for Z. [100]
+ gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
+ df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
+ gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
+ dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
+ c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
+ """
+
+ self.sess = sess
+ self.is_crop = is_crop
+ self.is_grayscale = (c_dim == 1)
+
+ self.batch_size = batch_size
+ self.sample_num = sample_num
+
+ self.input_height = input_height
+ self.input_width = input_width
+ self.output_height = output_height
+ self.output_width = output_width
+
+ self.y_dim = y_dim
+ self.z_dim = z_dim
+
+ self.gf_dim = gf_dim
+ self.df_dim = df_dim
+
+ self.gfc_dim = gfc_dim
+ self.dfc_dim = dfc_dim
+
+ self.c_dim = c_dim
+
+ # Batch normalization : deals with poor initialization helps gradient flow
+ self.d_bn1 = batch_norm(name='d_bn1')
+ self.d_bn2 = batch_norm(name='d_bn2')
+ self.d_bn3 = batch_norm(name='d_bn3')
+
+ self.g_bn0 = batch_norm(name='g_bn0')
+ self.g_bn1 = batch_norm(name='g_bn1')
+ self.g_bn2 = batch_norm(name='g_bn2')
+ self.g_bn3 = batch_norm(name='g_bn3')
+
+ self.dataset_name = dataset_name
+ self.input_fname_pattern = input_fname_pattern
+ self.load_checkpoint = load_checkpoint
+ self.checkpoint_dir = checkpoint_dir
+ self.counter_start = counter_start
+ self.log_dir = log_dir
+ self.stats_path = stats_path
+ self.data_path = data_path
+ self.fid_n_samples=fid_n_samples
+ self.fid_sample_batchsize=fid_sample_batchsize
+ self.fid_batch_size = fid_batch_size
+ self.fid_verbose = fid_verbose
+
+ self.beta1 = beta1
+
+ print("build model.. ", end="", flush=True)
+ self.build_model()
+ print("ok")
+
+ # Model
+ def build_model(self):
+
+ # Learning rate
+ self.learning_rate_d = tf.Variable(0.0, trainable=False)
+ self.learning_rate_g = tf.Variable(0.0, trainable=False)
+
+ # Placeholders
+
+ if self.is_crop:
+ image_dims = [self.output_height, self.output_width, self.c_dim]
+ else:
+ image_dims = [self.input_height, self.input_width, self.c_dim]
+
+ self.inputs = tf.placeholder(
+ tf.float32, [self.batch_size] + image_dims, name='real_images')
+ self.sample_inputs = tf.placeholder(
+ tf.float32, [self.sample_num] + image_dims, name='sample_inputs')
+
+ self.z = tf.placeholder(
+ tf.float32, [None, self.z_dim], name='z')
+ self.z_sum = tf.summary.histogram("z", self.z)
+
+ self.z_fid = tf.placeholder(
+ tf.float32, [None, self.z_dim], name='z_fid')
+
+ self.fid = tf.Variable(0.0, trainable=False)
+
+ # Inputs
+ inputs = self.inputs
+ sample_inputs = self.sample_inputs
+
+ # Discriminator and generator
+ if self.y_dim:
+ print()
+ print("Conditional GAN for MNIST not supported.")
+ raise SystemExit()
+
+ else:
+ self.G = self.generator(self.z, batch_size=self.batch_size)
+ self.D_real, self.D_logits_real = self.discriminator(inputs)
+
+ self.sampler_fid = self.sampler_func(self.z_fid, self.fid_sample_batchsize)
+ self.sampler = self.sampler_func(self.z, self.batch_size)
+ self.D_fake, self.D_logits_fake = self.discriminator(self.G, reuse=True)
+
+ # Summaries
+ self.d_real_sum = tf.summary.histogram("d_real", self.D_real)
+ self.d_fake_sum = tf.summary.histogram("d_fake", self.D_fake)
+ self.G_sum = tf.summary.image("G", self.G)
+
+ def sigmoid_cross_entropy_with_logits(x, y):
+ try:
+ return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
+ except:
+ return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
+
+ # Discriminator Loss Real
+ self.d_loss_real = tf.reduce_mean(
+ sigmoid_cross_entropy_with_logits(self.D_logits_real, tf.ones_like(self.D_real)))
+ # Discriminator Loss Fake
+ self.d_loss_fake = tf.reduce_mean(
+ sigmoid_cross_entropy_with_logits(self.D_logits_fake, tf.zeros_like(self.D_fake)))
+ # Generator Loss
+ self.g_loss = tf.reduce_mean(
+ sigmoid_cross_entropy_with_logits(self.D_logits_fake, tf.ones_like(self.D_fake)))
+
+ self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
+ self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
+
+ # Discriminator Loss Combined
+ self.d_loss = self.d_loss_real + self.d_loss_fake
+
+ self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
+ self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
+
+ self.lrate_sum_d = tf.summary.scalar('learning rate d', self.learning_rate_d)
+ self.lrate_sum_g = tf.summary.scalar('learning rate g', self.learning_rate_g)
+
+ self.fid_sum = tf.summary.scalar("FID", self.fid)
+
+ # Variables
+ t_vars = tf.trainable_variables()
+
+ self.d_vars = [var for var in t_vars if 'd_' in var.name]
+ self.g_vars = [var for var in t_vars if 'g_' in var.name]
+
+ # Train optimizers
+ opt_d = tf.train.AdamOptimizer(self.learning_rate_d, beta1=self.beta1)
+ opt_g = tf.train.AdamOptimizer(self.learning_rate_g, beta1=self.beta1)
+
+ # Discriminator
+ grads_and_vars = opt_d.compute_gradients(self.d_loss, var_list=self.d_vars)
+ grads = []
+ self.d_optim = opt_d.apply_gradients(grads_and_vars)
+
+ # Gradient summaries discriminator
+ sum_grad_d = []
+ for i, (grad, vars_) in enumerate(grads_and_vars):
+ grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(grad)))
+ sum_grad_d.append(tf.summary.scalar("grad_l2_d_%d_%s" % (i, vars_.name), grad_l2))
+
+ # Generator
+ grads_and_vars = opt_g.compute_gradients(self.g_loss, var_list=self.g_vars)
+ self.g_optim = opt_g.apply_gradients(grads_and_vars)
+
+ # Gradient summaries generator
+ sum_grad_g = []
+ for i, (grad, vars_) in enumerate(grads_and_vars):
+ grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(grad)))
+ sum_grad_g.append(tf.summary.scalar("grad_l2_g_%d_%s" % (i, vars_.name), grad_l2))
+
+ # Init:
+ tf.global_variables_initializer().run()
+
+ # Summaries
+ self.g_sum = tf.summary.merge([self.z_sum, self.d_fake_sum,
+ self.G_sum, self.d_loss_fake_sum, self.g_loss_sum, self.lrate_sum_g] + sum_grad_g)
+ self.d_sum = tf.summary.merge(
+ [self.z_sum, self.d_real_sum, self.d_loss_real_sum, self.d_loss_sum, self.lrate_sum_d] + sum_grad_d)
+ self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
+
+
+ # Checkpoint saver
+ self.saver = tf.train.Saver()
+
+ # check if fid_sample_batchsize is a multiple of fid_n_samples
+ if not (self.fid_n_samples % self.fid_sample_batchsize == 0):
+ new_bs = self.fid_n_samples // self.fid_sample_batchsize
+ n_old = self.fid_n_samples
+ self.fid_n_samples = new_bs * self.fid_sample_batchsize
+ print("""!WARNING: fid_sample_batchsize is not a multiple of fid_n_samples.
+ Number of generated sample will be adjusted form %d to %d """ % (n_old, self.fid_n_samples))
+
+ # Train model
+ def train(self, config):
+ """Train DCGAN"""
+
+ print("load train stats.. ", end="", flush=True)
+ # load precalculated training set statistics
+ f = np.load(self.stats_path)
+ mu_real, sigma_real = f['mu'][:], f['sigma'][:]
+ f.close()
+ print("ok")
+
+ if config.dataset == 'mnist':
+ print("scan files", end=" ", flush=True)
+ data_X, data_y = self.load_mnist()
+ else:
+ if (config.dataset == "celebA") or (config.dataset == "cifar10"):
+ print("scan files", end=" ", flush=True)
+ data = glob(os.path.join(self.data_path, self.input_fname_pattern))
+ else:
+ if config.dataset == "lsun":
+ print("scan files")
+ data = []
+ for i in range(304):
+ print("\r%d" % i, end="", flush=True)
+ data += glob(os.path.join(self.data_path, str(i), self.input_fname_pattern))
+ else:
+ print("Please specify dataset in run.sh [mnist, celebA, lsun, cifar10]")
+ raise SystemExit()
+
+ print()
+ print("%d images found" % len(data))
+
+ # Z sample
+ #sample_z = np.random.normal(0, 1.0, size=(self.sample_num , self.z_dim))
+ sample_z = np.random.uniform(-1.0, 1.0, size=(self.sample_num , self.z_dim))
+
+ # Input samples
+ sample_files = data[0:self.sample_num]
+ sample = [
+ get_image(sample_file,
+ input_height=self.input_height,
+ input_width=self.input_width,
+ resize_height=self.output_height,
+ resize_width=self.output_width,
+ is_crop=self.is_crop,
+ is_grayscale=self.is_grayscale) for sample_file in sample_files]
+ if (self.is_grayscale):
+ sample_inputs = np.array(sample).astype(np.float32)[:, :, :, None]
+ else:
+ sample_inputs = np.array(sample).astype(np.float32)
+
+ if self.load_checkpoint:
+ if self.load(self.checkpoint_dir):
+ print(" [*] Load SUCCESS")
+ else:
+ print(" [!] Load failed...")
+
+ # Batch preparing
+ batch_nums = min(len(data), config.train_size) // config.batch_size
+ data_idx = list(range(len(data)))
+
+ counter = self.counter_start
+
+ start_time = time.time()
+
+ # Loop over epochs
+ for epoch in range(config.epoch):
+
+ # Assign learning rates for d and g
+ lrate = config.learning_rate_d # * (config.lr_decay_rate_d ** epoch)
+ self.sess.run(tf.assign(self.learning_rate_d, lrate))
+ lrate = config.learning_rate_g # * (config.lr_decay_rate_g ** epoch)
+ self.sess.run(tf.assign(self.learning_rate_g, lrate))
+
+ # Shuffle the data indices
+ np.random.shuffle(data_idx)
+
+ # Loop over batches
+ for batch_idx in range(batch_nums):
+
+ # Prepare batch
+ idx = data_idx[batch_idx * config.batch_size:(batch_idx + 1) * config.batch_size]
+ batch = [
+ get_image(data[i],
+ input_height=self.input_height,
+ input_width=self.input_width,
+ resize_height=self.output_height,
+ resize_width=self.output_width,
+ is_crop=self.is_crop,
+ is_grayscale=self.is_grayscale) for i in idx]
+ if (self.is_grayscale):
+ batch_images = np.array(batch).astype(np.float32)[:, :, :, None]
+ else:
+ batch_images = np.array(batch).astype(np.float32)
+
+ #batch_z = np.random.normal(0, 1.0, size=(config.batch_size , self.z_dim)).astype(np.float32)
+ batch_z = np.random.uniform(-1.0, 1.0, size=(config.batch_size , self.z_dim)).astype(np.float32)
+
+ # Update D network
+ _, summary_str = self.sess.run([self.d_optim, self.d_sum],
+ feed_dict={self.inputs: batch_images,
+ self.z: batch_z})
+ if np.mod(counter, 20) == 0:
+ self.writer.add_summary(summary_str, counter)
+
+ # Update G network
+ _, summary_str = self.sess.run([self.g_optim, self.g_sum],
+ feed_dict={self.z: batch_z})
+ if np.mod(counter, 20) == 0:
+ self.writer.add_summary(summary_str, counter)
+
+ errD_fake = self.d_loss_fake.eval({ self.z: batch_z })
+ errD_real = self.d_loss_real.eval({ self.inputs: batch_images })
+ errG = self.g_loss.eval({self.z: batch_z})
+
+ # Print
+ if np.mod(counter, 100) == 0:
+ print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
+ % (epoch, batch_idx, batch_nums, time.time() - start_time, errD_fake+errD_real, errG))
+
+ # Save generated samples and FID
+ if np.mod(counter, config.fid_eval_steps) == 0:
+
+ # Save
+ try:
+ samples, d_loss, g_loss = self.sess.run(
+ [self.sampler, self.d_loss, self.g_loss],
+ feed_dict={self.z: sample_z,
+ self.inputs: sample_inputs})
+ save_images(samples, [8, 8], '{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, batch_idx))
+ print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
+ except Exception as e:
+ print(e)
+ print("sample image error!")
+
+ # FID
+ print("samples for incept", end="", flush=True)
+
+ samples = np.zeros((self.fid_n_samples, self.output_height, self.output_width, 3))
+ n_batches = self.fid_n_samples // self.fid_sample_batchsize
+ lo = 0
+ for btch in range(n_batches):
+ print("\rsamples for incept %d/%d" % (btch + 1, n_batches), end=" ", flush=True)
+ #sample_z_fid = np.random.normal(0, 1.0, size=(self.fid_sample_batchsize, self.z_dim))
+ sample_z_fid = np.random.uniform(-1.0, 1.0, size=(self.fid_sample_batchsize, self.z_dim))
+ samples[lo:(lo+self.fid_sample_batchsize)] = self.sess.run( self.sampler_fid,
+ feed_dict={self.z_fid: sample_z_fid})
+ lo += self.fid_sample_batchsize
+
+ samples = (samples + 1.) * 127.5
+ print("ok")
+
+ mu_gen, sigma_gen = fid.calculate_activation_statistics( samples,
+ self.sess,
+ batch_size=self.fid_batch_size,
+ verbose=self.fid_verbose)
+
+ print("calculate FID:", end=" ", flush=True)
+ try:
+ FID = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
+ except Exception as e:
+ print(e)
+ FID=500
+
+ print(FID)
+
+ # Update event log with FID
+ self.sess.run(tf.assign(self.fid, FID))
+ summary_str = self.sess.run(self.fid_sum)
+ self.writer.add_summary(summary_str, counter)
+
+ # Save checkpoint
+ if (counter != 0) and (np.mod(counter, 2000) == 0):
+ self.save(config.checkpoint_dir, counter)
+
+ counter += 1
+
+ # Discriminator
+ def discriminator(self, image, y=None, reuse=False):
+ with tf.variable_scope("discriminator") as scope:
+ if reuse:
+ scope.reuse_variables()
+ h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
+ h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
+ h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
+ h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
+ h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')
+ return tf.nn.sigmoid(h4), h4
+
+
+ # Generator
+ def generator(self, z, y=None, batch_size=None):
+ with tf.variable_scope("generator") as scope:
+
+ if not self.y_dim:
+ s_h, s_w = self.output_height, self.output_width
+ s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
+ s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
+ s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
+ s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
+
+ # Project `z` and reshape
+ self.z_, self.h0_w, self.h0_b = linear(
+ z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True)
+ self.h0 = tf.reshape(
+ self.z_, [-1, s_h16, s_w16, self.gf_dim * 8])
+ h0 = tf.nn.relu(self.g_bn0(self.h0))
+
+ # Deconv
+ self.h1, self.h1_w, self.h1_b = deconv2d(
+ h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1', with_w=True)
+ h1 = tf.nn.relu(self.g_bn1(self.h1))
+ h2, self.h2_w, self.h2_b = deconv2d(
+ h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2', with_w=True)
+ h2 = tf.nn.relu(self.g_bn2(h2))
+ h3, self.h3_w, self.h3_b = deconv2d(
+ h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3', with_w=True)
+ h3 = tf.nn.relu(self.g_bn3(h3))
+ h4, self.h4_w, self.h4_b = deconv2d(
+ h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4', with_w=True)
+
+ return tf.nn.tanh(h4)
+
+
+ # Sampler
+ def sampler_func(self, z, batch_size, y=None):
+ with tf.variable_scope("generator") as scope:
+ scope.reuse_variables()
+
+ if not self.y_dim:
+ s_h, s_w = self.output_height, self.output_width
+ s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
+ s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
+ s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
+ s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)
+
+ # Project `z` and reshape
+ h0 = tf.reshape(
+ linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'),
+ [-1, s_h16, s_w16, self.gf_dim * 8])
+ h0 = tf.nn.relu(self.g_bn0(h0, train=False))
+
+ # Deconv
+ h1 = deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1')
+ h1 = tf.nn.relu(self.g_bn1(h1, train=False))
+ h2 = deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2')
+ h2 = tf.nn.relu(self.g_bn2(h2, train=False))
+ h3 = deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3')
+ h3 = tf.nn.relu(self.g_bn3(h3, train=False))
+ h4 = deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4')
+
+ return tf.nn.tanh(h4)
+
+
+ @property
+ def model_dir(self):
+ return "{}_{}_{}_{}".format(
+ self.dataset_name, self.batch_size,
+ self.output_height, self.output_width)
+
+ # Save checkpoint
+ def save(self, checkpoint_dir, step):
+ model_name = "DCGAN.model"
+ checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
+
+ if not os.path.exists(checkpoint_dir):
+ os.makedirs(checkpoint_dir)
+
+ self.saver.save(self.sess,
+ os.path.join(checkpoint_dir, model_name),
+ global_step=step)
+
+ # Load checkpoint
+ def load(self, checkpoint_dir):
+ print(" [*] Reading checkpoints...")
+ checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
+
+ ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
+ if ckpt and ckpt.model_checkpoint_path:
+ ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
+ self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
+ print(" [*] Success to read {}".format(ckpt_name))
+ return True
+ else:
+ print(" [*] Failed to find a checkpoint")
+ return False
+
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/ops.py b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/ops.py
new file mode 100644
index 00000000..26342040
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/ops.py
@@ -0,0 +1,124 @@
+import math
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.python.framework import ops
+
+from utils import *
+
+try:
+ image_summary = tf.image_summary
+ scalar_summary = tf.scalar_summary
+ histogram_summary = tf.histogram_summary
+ merge_summary = tf.merge_summary
+ SummaryWriter = tf.train.SummaryWriter
+except:
+ image_summary = tf.summary.image
+ scalar_summary = tf.summary.scalar
+ histogram_summary = tf.summary.histogram
+ merge_summary = tf.summary.merge
+ SummaryWriter = tf.summary.FileWriter
+
+if "concat_v2" in dir(tf):
+ def concat(tensors, axis, *args, **kwargs):
+ return tf.concat_v2(tensors, axis, *args, **kwargs)
+else:
+ def concat(tensors, axis, *args, **kwargs):
+ return tf.concat(tensors, axis, *args, **kwargs)
+
+class batch_norm(object):
+ def __init__(self, scale=True, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
+ with tf.variable_scope(name):
+ self.epsilon = epsilon
+ self.momentum = momentum
+ self.name = name
+ self.scale = scale
+
+ def __call__(self, x, train=True):
+ return tf.contrib.layers.batch_norm(x,
+ decay=self.momentum,
+ updates_collections=None,
+ epsilon=self.epsilon,
+ scale=self.scale,
+ is_training=train,
+ scope=self.name)
+
+def conv_cond_concat(x, y):
+ """Concatenate conditioning vector on feature map axis."""
+ x_shapes = x.get_shape()
+ y_shapes = y.get_shape()
+ return concat([
+ x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
+
+def conv2d(input_, output_dim,
+ k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
+ name="conv2d"):
+ # ELU init stddev
+ #n = k_h * k_w * (input_.get_shape().as_list()[-1] + output_dim) / 2.0
+ #n = k_h * k_w * tf.sqrt(tf.cast(input_.get_shape().as_list()[-1] * output_dim, tf.float32))
+ n = k_h * k_w * input_.get_shape().as_list()[-1]
+ #stddev = tf.sqrt(1.55052/n)
+ with tf.variable_scope(name):
+ w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
+ initializer=tf.truncated_normal_initializer(stddev=stddev))
+ conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
+
+ biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
+ conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
+
+ return conv
+
+def deconv2d(input_, output_shape,
+ k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
+ name="deconv2d", with_w=False):
+ # ELU init stddev
+ #n = k_h * k_w * (input_.get_shape().as_list()[-1] + output_shape[-1]) / 2.0
+ #n = k_h * k_w * tf.sqrt(tf.cast(input_.get_shape().as_list()[-1] * output_shape[-1], tf.float32))
+ n = k_h * k_w *input_.get_shape().as_list()[-1]
+ #stddev = tf.sqrt(1.55052/n)
+ with tf.variable_scope(name):
+ # filter : [height, width, output_channels, in_channels]
+ w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
+ initializer=tf.random_normal_initializer(stddev=stddev))
+
+ try:
+ deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
+ strides=[1, d_h, d_w, 1])
+
+ # Support for verisons of TensorFlow before 0.7.0
+ except AttributeError:
+ deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
+ strides=[1, d_h, d_w, 1])
+
+ biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
+ deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
+
+ if with_w:
+ return deconv, w, biases
+ else:
+ return deconv
+
+def lrelu(x, leak=0.2, name="lrelu"):
+ return tf.maximum(x, leak*x)
+
+def elu(x, name="elu"):
+ return(tf.nn.elu(x))
+
+# Scaled ELU
+def selu(x, name="selu"):
+ alpha = 1.6732632423543772848170429916717
+ scale = 1.0507009873554804934193349852946
+ return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
+
+def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
+ shape = input_.get_shape().as_list()
+
+ with tf.variable_scope(scope or "Linear"):
+ matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
+ tf.random_normal_initializer(stddev=stddev))
+ bias = tf.get_variable("bias", [output_size],
+ initializer=tf.constant_initializer(bias_start))
+ if with_w:
+ return tf.matmul(input_, matrix) + bias, matrix, bias
+ else:
+ return tf.matmul(input_, matrix) + bias
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/run.sh b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/run.sh
new file mode 100644
index 00000000..70cfe47e
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/run.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+# celebA, lsun, imagenet, cifar10..
+dataset="cifar10"
+
+lr_d=$1
+lr_g=$2
+
+counter_start=0
+load_checkpoint=false
+if ! $load_checkpoint; then
+ dwt=`date "+%m%d_%H%M%S"`
+ run_id=${dwt}_${lr_d}_${lr_g}
+else
+ run_id="MMdd_hhmmss_lrd_lrg"
+fi
+
+incept_path="inception-2015-12-05/classify_image_graph_def.pb"
+
+case $dataset in
+ celebA)
+ data_path="data/celebA_cropped"
+ stats_path="stats/fid_stats_celeba.npz"
+ input_height=64
+ output_height=64
+ input_fname_pattern="*.jpg"
+ epochs=81
+ ;;
+ lsun)
+ data_path="data/lsun_cropped"
+ stats_path="stats/fid_stats_lsun.npz"
+ input_height=64
+ output_height=64
+ input_fname_pattern="*.jpg"
+ epochs=9
+ ;;
+ imagenet)
+ data_path="data/imagenet"
+ stats_path="stats/fid_stats_imagenet.npz"
+ input_height=64
+ output_height=64
+ input_fname_pattern="*.jpg"
+ epochs=5
+ ;;
+ cifar10)
+ data_path="data/cifar10_train"
+ stats_path="stats/fid_stats_cifar10.npz"
+ input_fname_pattern="*.png"
+ input_height=32
+ output_height=32
+ epochs=500
+ ;;
+esac
+
+python3 main.py \
+--dataset=$dataset \
+--input_height=$input_height \
+--output_height=$output_height \
+--input_fname_pattern=$input_fname_pattern \
+--is_crop False \
+--is_train=True \
+--batch_size=64 \
+--checkpoint_dir="logs/${run_id}/checkpoints" \
+--log_dir="logs/${run_id}/logs" \
+--sample_dir="logs/${run_id}/samples" \
+--fid_n_samples 50000 \
+--fid_sample_batchsize 1000 \
+--fid_batch_size 100 \
+--fid_eval_steps 5000 \
+--learning_rate_d $lr_d \
+--learning_rate_g $lr_g \
+--beta1 0.5 \
+--epoch $epochs \
+--load_checkpoint $load_checkpoint \
+--counter_start $counter_start \
+--incept_path $incept_path \
+--data_path $data_path \
+--stats_path $stats_path \
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/stats/README.md b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/stats/README.md
new file mode 100644
index 00000000..6765f6d4
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/stats/README.md
@@ -0,0 +1 @@
+Folder for precalculated FID statistics
diff --git a/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/utils.py b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/utils.py
new file mode 100644
index 00000000..152b530b
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/DCGAN_FID_batched/utils.py
@@ -0,0 +1,241 @@
+"""
+Some codes from https://github.com/Newmu/dcgan_code
+"""
+from __future__ import division
+import math
+import json
+import random
+import pprint
+import scipy.misc
+import numpy as np
+from time import gmtime, strftime
+from six.moves import xrange
+
+pp = pprint.PrettyPrinter()
+
+get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
+
+def get_image(image_path, input_height, input_width,
+ resize_height=64, resize_width=64,
+ is_crop=True, is_grayscale=False):
+ image = imread(image_path, is_grayscale)
+ return transform(image, input_height, input_width,
+ resize_height, resize_width, is_crop)
+
+def save_images(images, size, image_path):
+ return imsave(inverse_transform(images), size, image_path)
+
+def imread(path, is_grayscale = False):
+ if (is_grayscale):
+ return scipy.misc.imread(path, flatten = True).astype(np.float)
+ else:
+ return scipy.misc.imread(path).astype(np.float)
+
+def merge_images(images, size):
+ return inverse_transform(images)
+
+def merge(images, size):
+ h, w = images.shape[1], images.shape[2]
+ img = np.zeros((h * size[0], w * size[1], 3))
+ for idx, image in enumerate(images):
+ i = idx % size[1]
+ j = idx // size[1]
+ img[j*h:j*h+h, i*w:i*w+w, :] = image
+ return img
+
+def imsave(images, size, path):
+ return scipy.misc.imsave(path, merge(images, size))
+
+def center_crop(x, crop_h, crop_w,
+ resize_h=64, resize_w=64):
+ if crop_w is None:
+ crop_w = crop_h
+ h, w = x.shape[:2]
+ j = int(round((h - crop_h)/2.))
+ i = int(round((w - crop_w)/2.))
+ return scipy.misc.imresize(
+ x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
+
+def transform(image, input_height, input_width,
+ resize_height=64, resize_width=64, is_crop=True):
+ if is_crop:
+ cropped_image = center_crop(
+ image, input_height, input_width,
+ resize_height, resize_width)
+ else:
+ if (input_height != resize_height) or (input_width != resize_width):
+ cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
+ else:
+ cropped_image = image
+ return np.array(cropped_image) / 127.5 - 1.
+
+def inverse_transform(images):
+ return (images+1.)/2.
+
+def to_json(output_path, *layers):
+ with open(output_path, "w") as layer_f:
+ lines = ""
+ for w, b, bn in layers:
+ layer_idx = w.name.split('/')[0].split('h')[1]
+
+ B = b.eval()
+
+ if "lin/" in w.name:
+ W = w.eval()
+ depth = W.shape[1]
+ else:
+ W = np.rollaxis(w.eval(), 2, 0)
+ depth = W.shape[0]
+
+ biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]}
+ if bn != None:
+ gamma = bn.gamma.eval()
+ beta = bn.beta.eval()
+
+ gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]}
+ beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]}
+ else:
+ gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
+ beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}
+
+ if "lin/" in w.name:
+ fs = []
+ for w in W.T:
+ fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]})
+
+ lines += """
+ var layer_%s = {
+ "layer_type": "fc",
+ "sy": 1, "sx": 1,
+ "out_sx": 1, "out_sy": 1,
+ "stride": 1, "pad": 0,
+ "out_depth": %s, "in_depth": %s,
+ "biases": %s,
+ "gamma": %s,
+ "beta": %s,
+ "filters": %s
+ };""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
+ else:
+ fs = []
+ for w_ in W:
+ fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]})
+
+ lines += """
+ var layer_%s = {
+ "layer_type": "deconv",
+ "sy": 5, "sx": 5,
+ "out_sx": %s, "out_sy": %s,
+ "stride": 2, "pad": 1,
+ "out_depth": %s, "in_depth": %s,
+ "biases": %s,
+ "gamma": %s,
+ "beta": %s,
+ "filters": %s
+ };""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),
+ W.shape[0], W.shape[3], biases, gamma, beta, fs)
+ layer_f.write(" ".join(lines.replace("'","").split()))
+
+def make_gif(images, fname, duration=2, true_image=False):
+ import moviepy.editor as mpy
+
+ def make_frame(t):
+ try:
+ x = images[int(len(images)/duration*t)]
+ except:
+ x = images[-1]
+
+ if true_image:
+ return x.astype(np.uint8)
+ else:
+ return ((x+1)/2*255).astype(np.uint8)
+
+ clip = mpy.VideoClip(make_frame, duration=duration)
+ clip.write_gif(fname, fps = len(images) / duration)
+
+def visualize(sess, dcgan, config, option):
+ image_frame_dim = int(math.ceil(config.batch_size**.5))
+ if option == 0:
+ z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
+ save_images(samples, [image_frame_dim, image_frame_dim], '%s/test_%s.png' % (config.sample_dir, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
+ elif option == 1:
+ values = np.arange(0, 1, 1./config.batch_size)
+ for idx in xrange(100):
+ print(" [*] %d" % idx)
+ z_sample = np.zeros([config.batch_size, dcgan.z_dim])
+ for kdx, z in enumerate(z_sample):
+ z[idx] = values[kdx]
+
+ if config.dataset == "mnist":
+ y = np.random.choice(10, config.batch_size)
+ y_one_hot = np.zeros((config.batch_size, 10))
+ y_one_hot[np.arange(config.batch_size), y] = 1
+
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
+ else:
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
+
+ save_images(samples, [image_frame_dim, image_frame_dim], '%s/test_arange_%s.png' % (config.sample_dir, idx))
+ elif option == 2:
+ values = np.arange(0, 1, 1./config.batch_size)
+ for idx in [random.randint(0, 99) for _ in xrange(100)]:
+ print(" [*] %d" % idx)
+ z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
+ z_sample = np.tile(z, (config.batch_size, 1))
+ #z_sample = np.zeros([config.batch_size, dcgan.z_dim])
+ for kdx, z in enumerate(z_sample):
+ z[idx] = values[kdx]
+
+ if config.dataset == "mnist":
+ y = np.random.choice(10, config.batch_size)
+ y_one_hot = np.zeros((config.batch_size, 10))
+ y_one_hot[np.arange(config.batch_size), y] = 1
+
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
+ else:
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
+
+ try:
+ make_gif(samples, '%s/test_gif_%s.gif' % (config.sample_dir, idx))
+ except:
+ save_images(samples, [image_frame_dim, image_frame_dim], '%s/test_%s.png' % (config.sample_dir, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
+ elif option == 3:
+ values = np.arange(0, 1, 1./config.batch_size)
+ for idx in xrange(100):
+ print(" [*] %d" % idx)
+ z_sample = np.zeros([config.batch_size, dcgan.z_dim])
+ for kdx, z in enumerate(z_sample):
+ z[idx] = values[kdx]
+
+ if config.dataset == "mnist":
+ y = np.random.choice(10, config.batch_size)
+ y_one_hot = np.zeros((config.batch_size, 10))
+ y_one_hot[np.arange(config.batch_size), y] = 1
+
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
+ else:
+ samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
+
+ make_gif(samples, '%s/test_gif_%s.gif' % (config.sample_dir, idx))
+ elif option == 4:
+ image_set = []
+ values = np.arange(0, 1, 1./config.batch_size)
+
+ for idx in xrange(100):
+ print(" [*] %d" % idx)
+ z_sample = np.zeros([config.batch_size, dcgan.z_dim])
+ for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
+
+ if config.dataset == "mnist":
+ y = np.random.choice(10, config.batch_size)
+ y_one_hot = np.zeros((config.batch_size, 10))
+ y_one_hot[np.arange(config.batch_size), y] = 1
+ image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot}))
+ else:
+ image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
+
+ make_gif(image_set[-1], '%s/test_gif_%s.gif' % (config.sample_dir, idx))
+
+ new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
+ for idx in range(64) + range(63, -1, -1)]
+ make_gif(new_image_set, '%s/test_gif_merged.gif' % config.sample_dir, duration=8)
diff --git a/Wav2Lip-master/evaluation/TTUR-master/FID_vs_Inception_Score/README.md b/Wav2Lip-master/evaluation/TTUR-master/FID_vs_Inception_Score/README.md
new file mode 100644
index 00000000..31759b20
--- /dev/null
+++ b/Wav2Lip-master/evaluation/TTUR-master/FID_vs_Inception_Score/README.md
@@ -0,0 +1,87 @@
+# Comparison of FID and Inception Score
+
+This experiments should highlight a crucial difference between the FID and the Inception Score (IS).
+The purpose of a generative model is to learn a real world distribution. Thus a good performance measure
+should, roughly speaking, somehow capture how far off the model distribution is. The experiments show,
+that in this sense the FID is a more useful measure.
+
+## Methodology
+While the idea of the IS is to capture 1) how real the structures in the generated images are and
+2) how much variability the generated samples have, there is no connection of the score to the
+real world distribution. Clearly the assumptions of the IS are met best on the dataset it is trained
+on, namely the ImageNet data set. It is however questionable, if the assumptions carry over to another image
+datasets. As an example consider the celebA dataset. It consists of about 200k face images of celebrities.
+While assumption 1) still holds it is not so clear why there should be a high variability across samples.
+
+But the main point is: an evaluation method should indicate how well the real world
+distribution has been learned. This implies: disturbed images should lead to a
+lower score or a higher distance respectively. Thus for the experiments we produce
+disturbed images of the celebA dataset with increasing disturbance levels
+to evaluate the FID and IS on them.
+The IS is transformed to an distance as described in the TTUR paper. This is done to
+make comparison between the two methods easier. We refer to the transformed
+IS as the IND - the inception distance.
+
+## Experiments
+1. Gaussian noise: We constructed a matrix N with Gaussian noise scaled to [0, 255]. The
+noisy image is computed as (1 − α)X + αN for α ∈ {0, 0.25, 0.5, 0.75}. The larger α is,
+the larger is the noise added to the image, the larger is the disturbance of the image.
+
+|FID|IND|
+|-|-|
+|
|
|
+
+2. Gaussian blur: The image is convolved with a Gaussian kernel with standard deviation
+α ∈ {0, 1, 2, 4}. The larger α is, the larger is the disturbance of the image, that is,
+the more the image is smoothed.
+
+|FID | IND|
+|-|-|
+|
|
|
+
+
+3. Black rectangles: To an image five black rectangles are are added at randomly chosen
+locations. The rectangles cover parts of the image.The size of the rectangles is
+α imagesize with α ∈ {0, 0.25, 0.5, 0.75}. The larger α is, the larger is the disturbance
+of the image, that is, the more of the image is covered by black rectangles.
+
+|FID|IND|
+|-|-|
+|
|
|
+
+
+4. Swirl: Parts of the image are transformed as a spiral, that is, as a swirl (whirlpool
+effect). Consider the coordinate (x, y) in the noisy (swirled) image for which we want to
+find the color. Toward this end we need the reverse mapping for the swirl transformation
+which gives the location which is mapped to (x, y). The disturbance level is given by the
+amount of swirl α ∈ {0, 1, 2, 4}. The larger α is, the larger is the disturbance of the
+image via the amount of swirl.
+
+|FID|IND|
+|-|-|
+|
|
|
+
+
+5. Salt and pepper noise: Some pixels of the image are set to black or white, where black is
+chosen with 50% probability (same for white). Pixels are randomly chosen for being flipped
+to white or black, where the ratio of pixel flipped to white or black is given by the noise
+level α ∈ {0, 0.1, 0.2, 0.3}. The larger α is, the larger is the noise added to the image via
+flipping pixels to white or black, the larger is the disturbance level.
+
+|FID|IND|
+|-|-|
+|
|
|
+
+
+6. ImageNet contamination: From each of the 1,000 ImageNet classes, 5 images are randomly
+chosen, which gives 5,000 ImageNet images. The images are ensured to be RGB and to
+have a minimal size of 256x256. A percentage of α ∈ {0, 0.25, 0.5, 0.75} of the CelebA
+images has been replaced by ImageNet images. α = 0 means all images are from CelebA,
+α = 0.25 means that 75% of the images are from CelebA and 25% from ImageNet etc.
+The larger α is, the larger is the disturbance of the CelebA dataset by contaminating it by
+ImageNet images. The larger the disturbance level is, the more the dataset deviates from the
+reference real world dataset.
+
+|FID|IND|
+|-|-|
+|
|
|
diff --git a/Wav2Lip-master/evaluation/TTUR-master/FID_vs_Inception_Score/figures/blur_FID.pdf b/Wav2Lip-master/evaluation/TTUR-master/FID_vs_Inception_Score/figures/blur_FID.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..8e0a6259ae1f1fea799c2b4a3bd7d21f34a63f91
GIT binary patch
literal 45958
zcmd42byQtR(>IDk4(<}}#88mNPXg+Z2%7SM*gf9arMV`^dRXbt9mXu~Sy
z?JlF?Zt89c=KTHPnTpwSOLKQH&+k`NQ+IbuS4S`x^cBRa>gr^!Vd)Om2d0vg2Bx?4
zb_cV{IRMj&|4~c)QOki1!K~6wj_zQNhoSyU4ulQ(@aF>5E!~_vT+IQhp;W4aS(Pj;
zY)!?Tyn#8O?KrvFSa`u4oLns2&;ccY0WBTf-N1a%)*x0HS0@i=@PCo`{mv?(0cMr7
z^t3g%RF?q=1Ku=UOwOG&2*Sct~MUs
z!eg;tQg?nKIE#6PdDB*7!Ft%hK|b;7(YL0EddT1?q^*ftC}IR5I5*j9Ky@7dY$
zn8COW4|ehiUGMpK8E+eY;8zg}@I
zb1`?NRL~j4eA$pJSUQfd#E$JDRpahy!kRViVC}OTX72B9
zZL78>Om)h1;@)Q+*&T{aHpJgjP^_XQSAAu~j{7EJm;1>F#ZQPC6NH81WBIx@uGd2b
z)A#XyE$w&_6iQN>y1T16#A1%*JJaPqjc$Y@`*XRocg()z7N!Lq<%+2r$jMRD?99H)
z>3ugwWcH1@a4^&mU7;xH_KQc8WW!`^q8HC7bNO{x=Qg?dgfUhgti}@gJF(uHR+3Sc
zF_+ZB71eOdIAiJ$RJTLmJZ3nNvO5H22V7C|RgD%re2*V>dBY=oggXlHov83Ect>2$
z?Lv7zHmhF|>#ShXTV9a$D4cu}q}>ba`dU{u|9CPTlEjL=EN7U_7;!+kcUc>&0{-Uj
zur+sT>(aV!JT)ZqE=FddP>Nkx$}v1Qq)6&0L6!RKIpKaf@vsUO=@&s}A)kHJ8aMne
z>dUe};G4W9%A`Hps7r}(kg6GlDx3h)?cpl{E
zDX4Rw1Pj^QE`@!xHlu3*t)xtnzQTHhU|#Fz)4u&`R2O+26@)~LJz9S
z|4=A@DD?+L1Jy_nHa^ZjH9h-ZZNvYjra0I*p~~$)I!Y);Af}TIO~N<*C1G#GMeNG4
z1zr64-L5cupLUSkTNrv)kL1ccL9i|Dqx)}5OJ)MAUQ-!EgY7kvthfUVBOjT%=A5-B68AcsR
zWrx^vx%DbK!6}ayDlCqKLs*y=4Vo9qYCF559IZR@kA!e)m!9qU3#3$RlBDwP1nf&s
z^hcE0PzLns&~4M0%xTa{;aDiwK68BcBV+F)eV$vW8!>omIy_W;%}l2_5KX{|nRS}E
zH*OG>T{>SWgV(z^ATs5|Cw|@g7S{gQP2hiWgB@yf|BZd%73~#xUhriAVYbm@c7=2nL|#Qz{Cz>8yx)9`z_@}`(Q9@gP+m{rymdd>;eBYxwA?=
z%Ut5XOY4q=b{)+`uNGFrX<2`-u%ZAf3*Rc)q3HN{{!?*I+9^tGp(lXkFuT3_#89ff
z+Dfk<#IhA%t1J*1ll25|$tmv1(k8yVr;3hN0fnDmN)##)Y+b-y310Z#}QK+>3(En1Ib>KMA41*9g=a=R9m{M(AOhTvRsdKm5mif6^gOwRi*>-
zQRS;kw?=lWSgLpJRCuR%3+dujz#00LaEXyRiUj7wCz7iy-6vv2RUsL#kFxzRDlZEt
zaSXH5g6WWub3_F`uh(Y2)d|V+@W`x<{7Aw;rgG2C$t6d%f=iP&{k>lQ;x5DY_69yr
zQzP>q;DWk-{{}7y_ut_XidMks1fxlur-{q7Uj$~jH_Gp|m7=HHe>+2N+rO?V()Gp}gNVy$(=E?hgc^=>8OPaq0OnY%!rMrhe
zl?wBuyk9@1n#)-v*(MgmG=cQa$mi}Pv*}spl^qpq)KH8q31LbWcdwozl^x8;@D$QB
zDs0_JdT~zOeW<%8U!X{~EL{3i=ilmp+f?bD~Y!94-^0Otnj}X5O(&zs}wF3fl7Ev
zv~9i+mT2M#2a}PiWKGs{r%a1VbeuccqZ3IPTBEgIl#=PgS_0xX#1{k_hvh=Mov*iq
z>cdc0e~BlVQm1ik*Qe$$cefd?a!h<_LT@!oAVjETi#?6+P9E_5@{EMeMY-~^`J|-1
zx#9LE-UPNHJv?RlE*niRDL#XLWljVizw*l4b+e*wmbQIU|1Wuv><&^LM3qqs)dZn^
z+AK1{lZ79V3v!(AjNsTmn3$$huMO)eU;MC>`KtDg3PSgZc!eGNds>+p=;NbqegP08
zK4RjQTmQW9y^W#1(m!DJZ_fnmoPWnkNv+925`wn9fM>7>vu)>-fvRqJ<|NUF65Rp*
z$jXpULvYrkfVPCJkJ-BNK}#Z#Tm-Vi!7S9bGA^>#(5H#lieHqPq)KSEbZdjqOyJH+
z#ng|y@ZJ*5ZTNgK2P@HpmeZufNsgBAM>B^-$J>ulonon)L|#gG%w_mP9KE~RJeB-a
zV0&*G++RH~k_gj;(L*LV5f$;#-LB*Xpcq6wlgZ7;YVTe$#Z5eN&z|UN99%43Kl9{~
zU_DwaX!Q0++gAA8v61V`>{ljEF?gaV;3fB@GPGEh;{}0ptI~kK?cB!2eCwI5^niM4kM9}S52yyK|uWQ^)$?5!}_rYiFF*7(#}NIkXu?q0atjqoKw
z>l2%H*30$cC*G}}q-KPyW~=ov6KrMT@e*F`+71t_C~Pz9d|kc-PyfUh5Pz-{l+EVV
zXV&7T>^z`MeX}jbQ={mkI
zOf-Jno-bUFT2gc|`wC@c@y8lL$~nDw3R4LeDd8#K#|I7;63>YP?_oK9`N94JQ~s?7
z3HiHBb8uIyvITYGNwC0>#KGE*o_XFsFvm&vtxrB2>EL<4&BV+2W
ze48yrO#DKOO5l(UrR4aCd$jxDTXrYU9rur7^LS>8#uw_^n48gI%}4mEwrpx$A-alW
z5~jmR{k;k7b&r>7m0p$D$7`0p@)ShEtj)J9MhPf%Fx_H4MC;Hps{e{E60K0`)uPoJ
zD6YYl_z!FOw~iO@-%X-aL=$>D8;YcFo(Vz#tyLy{d{y9Mu>72}N~3Q*KMuRbPt(va
zgbXz1P>n;RaCrYcDj4PtTDVRXjaigoXx)PyT_ff9u-t{N4NK
z;NtniY~+RhgV`8V91LQXP_RgNTR|CZX=WPQ#m-$3)KB#cNtDE131jetw2Tu5-i0Xk
zA1M0Q-UH9yxvmtggdt9e_3At=n~?3}G5q^u8-vZ;I?sd4UoTG-C?)bc;mKrvNy~%|
zZ+L(1gJ3kHveOzx9%n{uQ%618!gu)6SS_$O#0;Th#(iR97dMwGgLko%gevxO=ctpA
zNb-|ik`q^^M{Gbq)0p9SuE+V<6Hr=jlq%X|XT+&7A&fbk;r$faHcbZNRNgpDO~MbA
z(~m+5a?KJov}_`NX$$SYGupj`{c}be;&E^1Fe+e*eJ}p3Hx9+8uy2@QQ$d(t5wn)1t%3@*)PrWHKLwn@wEr==vf|s+
zFB4}HTB9HW9+M5*ND?mhrI&du@#4YVGzZCk9=;o`D#YH;iCsMruqFCT9zAa)PI1M`|CcSbMNWO$C%^(jK`Oc({>L8svMW-o_kV#bH!&Luj2elgn}zYF6bkb=lrW`
z!s`bo&bSq-asq6bvPc95j0u&IZqQDdB%i|fsN77a=C9|NqA(iO{{YRu^=mjd{_bRP
zsq>Z1i=u7&gABnzo
z;>PIE%;chdP`wlk(QLCGXl|WI!v-tFgJNT2zS5xHp(XxgAze4O6M6r{OX!7(R>W%@
z*a1A;96Me9rp}be8^JtSkzd%ZWzXtJ&GBW?8(n7b_a%;fdP%@mNeST~L+!T&0b++)
zGNu0K&orNbzwMIBIY&gQehBgmo{>f4t0HJBV7=0fOM2wK`;KZayB$YvFwM-6{|Zys
zow=ZmdwHNdO8!cg$xd;`D@V?DkSew;QOb$O$zP~m^q8eT&}axOrZGWp>o;1IZcd*<
zY#HK3nu5mpl{9P+S1^nNC+@_bKEPOkwvqmW2nPDcKx6Wph%_W8ShsJPwc+rDhtsCz
zq$VNXyHi&DMHz8lm|WdYUijSIz?0SnY5oIi{E}_eR`t?;GSL^WFCQ2BJsuPAVd2YuC(=|ZFl;7o
zLG~oIf;;}i#r%qR8(zd5#Ph<%Td`+)*Mpb~!FSS5#Y&Z(iO{953sJ*ksyDe)B`*SX
z*I)#_w*h;?lIT-vd{4ydCrXs+vLCl-3pvE15ps&vaw18)12zYqq?>9zd5(5%pfEe>
zhC58cDL_yx$d@allQz)Tjmm_4+dR}RrAbmOh=t?aHY;(PwzIkjh$eb*~Ris7__QQ-HRsZ>-YNAKNV
ze7`p?9>-{DSB85_AocQu>$w8kUOhX$ohn08Tw=Qxn*r*~UUvT-yPRTr=$LOE{Hw}C
zmsj8AuFqUvz!cxUK$^$=PD=Ice^SW#ZyhcO_urMV)L)Kr2Qo;0(6iirTPdYdRZxC~
zD3TW)LSYRe?l(kM2ZcsdVf{zLJ>DDKj$Ak^2p&P&pO$Un)cqT}ZR6b!ZMfZ-P)l*i
ztT0~0R?&|B=VMC5AguO!2-CM*$H~!cEu+o3)#qj*yv*e1Xzy=I7yl>tp3imUU
z{a6Jlz%lnzhG!+iOw9hGhu-+|iL*!H&9^6p4GdC==EEIZHkltjtWAEgeWGr%)S|3vhlNRLh{niA(VO|F?u*jZ`u{m
zD&m}e$w-R{p|%l*{Nm^uAq){4e*H-_?UWcR**kPb+Hu3vdAPZgBZ3c(uv^bLZ!G!D
zU}_}|nGS2%A?`3jyiraQ-$gUiMe(qxawKvxIJd^WYV5-fW#nrGIU3r;01d|bY6gAA
zjTtH2&$27Qu}Yjv%pwvoW`kLAYUgY5mn9X#~+(IG-}C_Kk}pN1@9v=5AWckvUDG!
zR)4?hW77Ryk^%zi16_x|}M+zuYW1C(SdEDvfASm{=y6;fafxBWUNG
z7{;nqr@m(yaUPb(=YPrw>0RR5;1Vq!6O&x&9m89i!Q*93o
zAU+0W)v>j3w{Zgl&izAp?RR+bA&SQeod+8K1LBL&fq+mQ^t<@)P~B4q8*umhl#`nS
z=*!0mhOl#RfDJ*c;-+qvzq|i;77(kHBM>u#Mj`(jxcl=Hh*i?o%E}TNrvqYw`e5Mb
z#|p&l%z*$UFPQa(rK=N|)yWZ{+T9Cya<_4{1fH#c5F+r`*3%LUT+Xmcf_c~g>cBhz
zaB^Ti9x$sZh}8_tY7S<#0JBivKw7{b8`EJ6+l(b}4eIa=FWf&unG
znF($Bl%0bc8o+gywY0UiaR-3?BNhoo?Y|uV&CmZ8Y#_|(Zt0*69pTx32Loc1)^1P~
z|FVuhoiKecHyhjk7X|cxI}QhcDJLMAK$I8E3%GpT9NZv2E;cX^aByPh;R162wh~&n
z06L++L)&uz#_6F3F13O7|62k5`8x;D7FwWV{8_kwb0&lh3IH!BP=JB>c)>h?@+Yv}JBKmo!sJV1Zwn0&l^
zK>hn0w0>9&2M-8ZLy3Sc3R<`!91nfDfQA1a1857(`|!-c2>`+SP`G&b9%??|^u`T*
z=Z3HY3;aDVConG;umd^xp#3<(TtLkWfCa4|3OlqN_X7yf8an?2)1UwXGys5tpmTBo
zeI7vhlQ?MoAi&U>IH50WP>7&pLzf1vf%3o)=ybmei0coY{b#7(4*-F`hhPU*40Pjy
zQUy>B?G6=#KiTzX4X_OKCm|0bZ~~`9VA&7kL*?K<<3n3P*YqGvz(~;d|BM19ij58Q
z@Y^2)@Nx5j|13~J_$>oaIrw{dz#5PXs7(B&Jjer7CjKlBvhs&aK;_}D1xnY$J1_=R
zHvTNom2_N
z%)#sr$s`~^`2x4Z58oaXHy~>dFAs_uP?5j;f*9I5{{2))%l_fa&~?kMfZJ^PdbD&Oe6#ha25L
z^KAZ1G6K?h?44XS9?bh+^L73(Ul4$%-yR8Io!Ox_6PjuRBF&Q|$d&XJkYgkAk$MRUWr?=LAnbujt
zgb4Rb5cnlz;T7f=Ix|rd87v}dELX~Sc^0jf7w0X$Ds{7i7N6PA8)#IB42B$)fBNvK
zRC(D908Z-v)71$GG?L1
z-Yuxqi4qqNTk>6c^CVq9#1^5)e3A^aeKg%Hf?Yq3prELq7u}@VO*^l@kue-@ij(HE
zUMV-B4ttGJ`9LK?o^7N5If3`}+1}lqk=0ih1X_xcA!Ek~MX8XRllfBQJ?IMa-#}pwDKllkhCdd$<(LAQ-sWBEOn${v{u{fR?s)2mrqA3j1
zlT@pu(mLE!uKPoDdR%F2r7*8ZA68n}aZsn+<_B#JLhy&-`>nTOGwQ
zgsE9pWB35j`og;n6q8b|nO=OR^&|Bcm+$-nGS->pp4r(i1yu;Zj&YtE(rJtpzW)O6
zU})@6+urDqA>^CjSW*`JqtRA}DXW0#5>=HeSCIOH<9gbFPIgU#@7YhMTEg}v=V#^I
z>`k+~L6haiZ9d0HQ~T$xUn(C)rkGbUK;@evvq$oWZlNzU?Y*X|ePT9Y*$L!8)uNEiVpn>Sv4$99b>2yBZuHe{k2}RU-?k
z(JrZCQn)N1CZV;8%E&(-_J?_EYI1^~+)lckuw#^W_
z5xhZnX=tgL%-U0cva`1zaFcV7?st-UU(n+7A%P~8L9+chnyO7s)l<-A2n5LzW;Zek
zoVD$1LWHClcdhveQ}LGfj0`K3oK0R!>4mHgVd8#^AEMtuq|=;%A$)OTVG-lgIycv2
zsd*SFwW$*NNXSu_)n$3j40UX_d7NG3OjC7jA0wPY3tU>w1W^`4ah;Qs<<{BR*|4uk
z;5kOYq*8ME>y9hlfwAxO?KVOo?)$f!DD^#e2Z1*{i?59DFEH5kzHlyNvW7vQy0&G;q(RYq|MPmk
z4YS$r%&%$&Zn!}!-S3GxsmXwNEf6cOmHQm8ZstfgO*2yJRxm(6+_->DTh0!u?7ZlkM-i=-&IUFoU{#nA3%kvcP`8hHrl#a8S2!
zd$<27Q@=EgE2yR)CLkp_W<@VzqVG0sUANVr=;-AY@u;4ixyAFnn0GuF5^p$06A+#K
zJx;%c?GKCtMBe{eBmO*XKTAFNgK^=bccnZ-&eN=j1k~Pb0$3V(gV|{Ks*N%H$GN2*
zL1KkfYpyWD1{Y6N@wJ?Y0?XacwWRmlm_<-IzhE!lR$d-4w
zA13hrSS9evOC+Xn|K{hubgmE1r3^*ibBv#?1jOdlNaw;BV<>0=1ain-1iI#{lp|yH
z_CML)Q^~Vt2qFLU-EVOUxCmAh3Op0vDWBkHaI)*Uw8N-UpP4~<2Fm>|t>5egc~8k$
z`8LU_{3UXn1atgEo^xMw7aRPS20ZL%8u8PS#65-kL5I9>=jZ#4*VGnejN01-Z}f_Z
zVE0?Nrp5VDclOW99D?U(XESejcSjd)emY&U!Fuk{T_%Og#fhU-85FR)p|x+UcfaS$
zcmaM{25P9^bB`{Zh~F0qMDp8ymDv7{khQD~C;D4$>D)8j39~mf-0wPb+1gGLy9=Kg
zsq&g+SnD!SgNF%8JVRMO0~gnb-z1pMBJ)`?eP)xlv~-~+q3wg
zorqbtG+NdEC7?TZ`nl(0sRje`Rd3b$-9{o+ZQZnEL3wa0bE2hcGQ^_~+af
zK`_!F|i6@Gf4b{Lw@9Cw$$#4%0N>9fFj`wCCw$IZ+SiTsB96AxfAxe=CrD)kJZBF&S`c
z-md(sHI%5zpL0VLFlJp96#2F;srIJ9d>41)ur4W+gm%dEIrLY
z8uYP>4c|%LY{AAo9jMDt!-9I9X0PZUA4@X%sRQvh{DMFp`dc8+C
zALgb4eq#GnhQgm(qkR7-Sw41sQ=14mzeyECpIXo66uf0lm+3*M)Z?3tFp-Pnwi}KI
zt+N~YNENb0`W-Q(KJ({wbzqC}P8f{)w~=EuI0rQ&V}t(j(-%!F3R$lBCN=ux&rk}2
zZdB1=`{JH*+xo;JjjA1Fo(XkS3K2-3vOSsDsu1WZov+Tr9zR2w+p|KzAQ~)WAwY{wd
zI9S_kF3dKr*FVZsO|&x0l!PfKxOnJF_v~lgwvxVf&}#zibgqw?uj=QpA3qgs&m_CH
zUwuTPs(0PoOUp#1gn);s<)k;7l`W|)f7^*~`HdX&$D8IE``Y&ArgGPl)1B2~C%*%v
zi|O^Y`WAoWw8kYfw+|w@L8e4jmkn6du!w%AYXcN_Blllodqu9i^+sjLwHIyH(}w5Z
zN>8w)vnY@udq1zGjPhI%@Wf2YQU{8+)B2`rhl*@rA$t=9>&OJ=F^sFPHUw@u1bbEA
zsPV(_ljJ7Qm3EmFZ@^Mv?goahna7>muf5b4>K;;AO%6!e%5JLxi=
zZvEg1I{$eu?!M79KMd#^*=)3eg-ACf_3PIA)V9mV?JU^cuD)Fea*AWrBxBdr
zAdBj3=e$C@EQeW|NR7l?lk^xjqe?>80oP);Vf~AZAJeyrli+h=bHVa~Z0qHhM|YJi
z;gGd%epG`ORG1O7M{YBDsPBZ~CiGI>6)l97IqNE%FFrA0gVk!W{xqWx=mM%#6d+v3o=hyb}d@3;!t
zs6F?O=jbP4=0sy7AvwNv0teL+z`h_O5r2c)#v{d|3Y#ZIT=m1Kr`KmD=unP4|xg0WRd
zL$}{q<-6?Y>lypHmr)CIbAE@Gzi4`Vkn(K9iKDzX@9gZx
zIfq`fAaSQErN$bgZp&uDln!rSv}a|yL|662NEuf;L3-+6b2AF%FmI`?d%PVe+O)hR
zf0Kr!IGk00?7jFB_PKQs(WzNe$%N7@9O0zSh*2wyQR|D-tFzro_IBQ=JXziD=(^!b
z)J(~{4`0V#3LkxjOGO@5JLmwcHQ(UdM0TY{lf!
zg!zYGQeoq$)qM(f0vK>~|PQA}v$#8_G%W1xC2cD}OWyZXlZo6c+9xpmOUHQ6C0uS+cj6dG5o{lm1;irWm
z+4Pc6l{d+U`LEN^jV7o9hKfEBuGd^xR!Il^T<>~>*`{hW(w1GBN;8=i_W~x$;>S9g
zs34uWyX2yt4qovoNquv}PkWr1ar7snBS03eJIy7NdBl-~3e|mYXSil4R$en=o~@o|
zfuNRof+T4nQWm{iMho*6^^ukq4Ps2YkjU+N)#x4}p&oCLvD4VQ42e{Jxr)BCdDF
zAex-IhH&sMVWhHLe!cOTc8$_eqLs>P354
zZ23^h$l}*QQsR7wDb*M?Xux8ENVNg3V7sf!KE-OOlTa(q%BE99HN;fygB!F_m!5M&nsZWQCJ4lBYd^A
z6h5u=i+hISEZ(Ah@mJD}jn@fQEP76i@+lc?LUDQ}o
zL=%J-K8mtQsY7h8x_B^Y;+<2qH=-KfWlK_QYtQq(IX#D!jCB`-{towH8~Xst#0HXcD8^X9CRg=Fq&!Yf
z(iY>9;%3fuZYD&JFa%y6V3~{QleK-!Doj(rcq)98`%o;3G5-O15jT6e8qQ2Lh5)0W
zLLsf&RM~*OFz|D&H<5c^;)WKwja#V4Ygrc7lp-WNt0tu`&%=X(%DGJGR^@6C<%{6g
zcWsGxwl{(3OnXasGn%iVH}p+vJFKrYnu_X*;1W5Dl&zU>1zjP4jn8DyL-V+*Xaf^
zwN@XtyMJhCA(l+;I>+0oUyJ>UYb+aRgfmckN)_nl{}eSvofY##C=ssDyZpm~&b-e;
z7lE>7lq>2rY1cHzg9DX#-&DgRX^Dk9BRr-hS9$19L*lp
z=)N3{Fr>mUN*3jX!!Q*4Gh-P=122f8$4`oHBjr}`gq_Ak=ESXGYCD1@d?mqB%YtLH
zwzOn%b(KpsVb3m_1q@S$D_(qjNhtNW+94I9l8{9^ELKDY_-enkxopLa1d9689wFar
z>B8J!vXZUL3dvj74AF|0F1#R}8AW_`{Gymvrjn{+d{q)A6GM%7E1#5~10%X(&)3vu
z7-8HXx(r+F7^Cut9@_fZYnq=Jn1ZBP4XJ1|VT34yXR>*bQTeJ4&Q5hibsPEV9Jd<@
zhE%x|}_3>3EnEA(g(d9Od4yal;Lme`CGv?hefepJxDmoLm63p3DOIM)b-xoS<7
zaAaoB@Vpjs_U3^3dCeXxukGMT*2K@847J@EAp=S4@n;m5G)hp
z^>0QyXbox8yMH$O9KV~DyloI6>q^(&whkJWMxu|jlSduz(Nt#)YP@FTTS9}!nw-_c
z3Dl~&kvG&9|7G}t<`=$|;eNRKT+bfo{QNwuaf`oK?``tRQK-hk2su%93IUzLV{_ES
zSR>e`&f6h|g8hQy!}i
ztX9S!gL`H?)L=c9e)=XX3b=f%8+zWP2k$X2yu$QGDx(@PLfX>7h&%O^6o#UHjx2a@
zDpL+ktcWRxnpS0g)ROtzpc}T#V$;Gc%jcQ06bILx@-mhQ2l$e$B1S25ZnsR~XS(qA
zDOc(zMoXbAgto~0aig^AIFKF#5Vm=oBPOIL`*VA@Rm%`!x`PIMYTk!DlqUyS&p5<7l=UmCKWh8y6L>i%^bRTe
z8(kHyb1OfG!9CMOwBKT!w9wG8)JO~(3+IJp?4q@XRj*!8V?u4O&mPR>uPw{z8kc%R
zWT!Qplk8euh7evfb@Yzxhf_@E3!Olg_6QErc@^a)dsBg39TK)i6v
zL;z__dBUq|jj6($3g;u{eso+oSAhBDdfkUuajxCCmH_FDfaJZ5*p5@^C_&^3R51Rt6JCLnM@4&VT}|d-Vf0r)y5AQ;Wc+>>2);^
z-t}y8&qk#r%7ICgQho9w7N?=v@jbRBFCPIzH+S`4JmI0gerKp>B
z9Ym)GQvx~dYIp>Dbb?vY&H?q%VM1H|my51y9LD!DTItlLYtQpSje*tf$Qvn`l=YF!
zK$lE?n}D<9p>9OLPv2O!6{GNZW*g`_q4*1DQ*(>5yq{=?$#L_tTMa?*>!^Ga?K
z^rZ3F?wJUVB7;wbaWS=18NJy|pQc}htG)P0Tq9^(j_X*#POzDpuHGhQy`-fiD_&!7VBhY7>RVat8XHXfQ+3Cu^FdBdPVc`y!75stv*4jZ
znHauP<6OB7t&fhoAkH
z<3kDuVqn=jU0L_HZ|vQbnQV2uOZ!Sh%m*91rb=F4I+YHk-GEKVR84}j4#XGVLs%l7
zms-~GvkGvA=1warirJaSd+od$W_RKBZ08-;nul{!LvPq^d^*R|dP!-rTGo6{DR3q)
zQ_(ABs$lE>d>EgvAsNJwbD4#vVBJm0Nt+q21WP{dnCF)u=D}B=;Ou)OcGbtE!;;3D
zpFSoPKyBm3l7Zept;rr4h-tp`5beaK3oQWo*;t7>9VFA$lRB7vIl_VYan
zAQ@`692z=w84;Np)qBG=uG;Q5f+Nek@UfX;exE})T{0xlzRPkJJhzq0b1(&o%5{p8
z5uVL+V&WJN2c&$3CCdNlpB`?Wj!XHjpZOLeVp7}Ui7j=DJ#%i1ke@2E%beo!a$m|G1r4#IP*io@}P(0&d`-{x}@wSWNHXMHQ?ALZV1Rh3so2Q*A_L-<{YrHV<(w~z|
zZUxf`H3?}(yT((^d%1}HGl$VO8kO1CmUku{o|S|if)1!ip&oX|
zGCZt5FYPBGM(RhVRoqEKq@R4KV-Ze68ZM-N1w~xrvXQ;N?nJ5QjiY?4@a@NUOqq1f
ztjLN6)-rirlV)jRMDO)Ivqsc9BiM7dn|&{VMV|Z1wyUbZTOiIeAmKd3k>RH^TJ5L3v|4^=vx2w)9%woLs;Y}(UxK@VHMk?eBG$v
zv6<^fNj(8o%AsYh81&hV`*e%$Q>SilKi31I^THyKMwK&18kkCm3N}c;JIo*@&cE8@
zgt;XRy!LIwyjfnjyn|avSI%dUWw-(S_=gaqeYAv?gt~<&PlPEsj23>nMV*9ThO3eq
zN_#;h_^hWX3I5DPU7+b&118UBIEiXlE2h%RlEFj9rY27Rv}N2S57fE$S%ggeX!~GstZ@L;T5OT8weAMvk|?Bcazv8W;;46@(&EwHjic99ZrrPq&>-IcGn3)$>Q
zzy*sO)>ln!w<e=x=vgJHz_$O8XNazWP~uMUc7tJi2Y&t$gjb7hxK!KC$dRuf
zL!^Kn#qpV*iZ{h;(CDspJAvaDjQ%2*&1@~{{K3jBDaaF(@Nc4ZB2Tfu%o|dz&cwqO
z6&0lnCKfT8zS6ej7aY_R$oP&-Cj_sZZ(*&aCD9u4kz!@%ZKGO{i?;V(P)0fiqKRf{
zi1F%f)7eH~u1u?TZre
z$zQNH%`;OF7hm;v)0iK?6@`MGi#!x)6lU=(5jQ#>JEP@eOr+X9$DXm6|V{=EEPT*9B!p9`a36
zSi$U!6t#*xD5!&*J;r_-G4p-hhgYETNh82hRGH^+J4zIt=P
zDUI1rPD>e<_vgjGM}1RBSWob}eaVmy#9|sbaxqMr2O*n+!1jl^RS}nmxR#mA9#sA6
zbn@4?njJ!N-bR-~Co+7hR4OqDgQBvm7Dkhv_?&^GLDjcV_h@YNJ;&$$E9Z2j20_If
z8cU~AqZng;>SwpFkwV!<8feW=T2EkYUX`u1Hys@4f?DWc*N3-4sED^2&!45h*g3=(
z@=Du1b!gQ(8Z3R8%kyvpW;G|G0@7NUmtZdEe-%z?<-@OF!vYBPWwkY$Bl3i72n>A>P~90
zkfe3SU2&r`#9aUKD5}3)iOt+OlUZC_yF5+=+-zIc<42^fMh;q9#d*>Nn$!5j{o{VC
zN6*oT%&$s7Ujw$TCPh&Sxh?BvpL!&R&67_pv^~DUQ2i1MnyQ(K=XSGSwBmq+ctMcu?Bb_U;4-5(+*92|f6}Z0`K(qF!$(VWbdp
z_Tn{Ke{RvIPG71@uBx{^Xu=XtpnSlPZ<3gOuv{UK5rE`(HI}*SMYp&*kjOtf;STe;
z(U&RCtb@^af47K$>nMlfMkQ;$Bp)K@s1}E#f!0Yac-ZOKMqieBvx2*LH^2LCeX>1_
zqBXE$ysY%OZNFOTn+te_Z>bP<%WQe;B9hMzZiCN1#m`9+f4|f|oXgIqub!Y$5jgo4
zRwG_|ST!(P#Hjv5ZhCy#5!vtVs`&iK2zRoAlUx25y*l&iiWW_hNePn)Q7CQk@$On9
zkZPe4c%-X4(f+PDap8{s(#uS)r$U6>vwidPW92?6vL=_dCit>ULs(_}@`W4myfp>`
z0|VHJ#@eEwf%DqTfRtGGGPt>+A*Y)wCz$=fgXe?0(H)%pUo1idog#-<-WIsald2lmUC<3y3cuq-))tu
zypq3IFvPnAow$>RnhuA2b7;A&F}^06s?{083-+jLZIImry=6$L#KY<2M0Wtv1orcJ
z>WpEv^kG?znlH9ISQ@0}=LC_JzZ&5L>TR{AE|CK9`Ucs8^bzq4zPBLD>Dy>_8&Oy5`uESaN})L`e$(0MG08Vj6d
zJU`n0`8CabbE^$2OI^Faswn5oiawjPLQ})FDPrxv;2t
z=2KMdrrOG*cdaUQE=d@oq_5%<8#)X~V_J(-i-7Cd(;rBq=lmU)1YKH)%yNqixPa=*N>9&us)?m8#mjYR*GF>5Do{dQKFb+Iu{57Qv!QyG_W6mmX2wqH?|~^ZiNYYdwKXal9XCio1k-XbmJ)Ky8^)_!
ztS=kcw13E5J!O>6Dq-6EOenq)_Gvr6Q0~S(qbDco9vN2D;?4j3?+FiSeb973Am^5y
z`_FXIhg`z{niEnk3lgN&Ph@LnPCzR}qb?g4bYc>tGre(vBViR&g{sJVF
zKc3nO-^WN$6x}@J9%MD(XJ5Ll*a@k+Z}me!*1`K>%S=+JFfe~m_-;zy>IXA>|7r?Wl+xN_}oQL
z6;v~!r|*QM4p*PX;mltSk!e|x5m9=w*b+}A%D^5e&~kPAJ`nEibm8u6+s*7f!q2
z0<>6tv;%3!+Adb33we1pXe%5$|5Hx2W>)KT4w&p&_)*9BH|I{3q{pqp#TNh?DkL(Q&;DKbn#
zWi{(qjW%WR)*lMIK-L0vHe+i7yjp2RoP>I~q@Q*>#-!m(JwWe(px2!jeCHrPKj>2f
z-FW+-q7`)XO}sQPme?EpQV5~Tr^*wTrSRoE9V&1NvROR-vm|(g*(IPLrVR0np8>4z
zyT{pu_#XE2SUuJ?4dy5B2#eSf*6ELT6~$8oJ{)ERHwnL(w6|5fmB@TCm2kRD%iYbi
z9jrfCzn5X;(ltXcjgqka{#hZPRwDA9h%V4+Y*~KedF8o{+bQ_=bQR+BR#FyRc8BLs
zV1H9Yp13zhg`X=ssr`-+LpJ^JFTqDuP*Po(*DBVgGGu{6v5N;i6sv2mhcNBD0n4a^
zGD9YLqSXk#_M1(dQMr;G8aER#be|H5ndV%-8ofBT}@Y7mb6VPQp%e|QBXm^yS
zn#}z|U%xjnGx$w*bQao&7re~HQ25}Uo&S{;?k}EkW}#Nq$<_3u*P8$1i{RPG*wW(?
zzi}u3)iXaj)p`%Mx}cbI<%u#>znXBRh%pHsn+mbIUnh@i`2HsPuTK}rVFhA8wr?^Q
zk5km@*@yMo%IOdsk^?ECm;)v_#spgN^{1=5^W9FM@bk8C$Nf1nq`plU{V&|A34nGQ
zF{=I#fqIqNc*ze;V#df~*6c0SY26@Z38(J_>>VAQk2ilxan=GJUsnEjb-hV3unOit
zzJ%*53>f$Mm3O(SGe=&NmO;EgxTL_Pn(**1Lyg3umD9aH`=twjz8asiU}2}B&1>;d
zuvdZ+AJoLPw^s2*v++YjSj^$}`6bPIk5bU{H8|^P`0&=L=w4zwD17^{e{|&n&ibFS
z?6m}IL45&{M>rQ2iClT)cii7nD}S209-TVwPoDiCS7S>}US3{Zj!n=}nwU_#%mKdW
z*6vmk`L(dIDN5@olUA&mS73RLugYO;=Zi!Po)rRNJ4tIEyGDkad{?VJ%zH|XwxXY?
z3(<(ZfD*>xp`xtDC*qf?tDIfd^SO9FXp6O5<}r+rNh%Wn+f^+aW}+Q
z^17PU)7kuN<75ZvZfA^S{dvjDX{~)q?(KyhpUDXaXwl)<+u1QH*6n!FYGo=StSRl#
z(l%u36bc9tSQ>@eSltm2b^*8{rR`qhoc=|BOvWP?crkQNlHE(&obv@
z&%HalhYlFM=psIxo$o$sHMJK%)m4267P`Ec{pI0|7*)~=&hJeIarVt)f4F`$MLRe#
zBmN^wTDU*9B6xN)OR_%pT(%^p@cE_77UknZm9vFd
z^r}1Uin-~c*&cVgn=O1gyQf@uV<&t$Yi5eA7_-Y%Rb7PFH{seI;RlsIS)C8-cX#)j
z;aS(yZ#KCabejn#dpH`o-D~Ldh9?5L+VH!_l0nBM0b+@Gz&~zSG;dt6m7J>6li;PE
z6_ZNiMUGK1Lf|e$!^`B@DHih0_Kv<#qL0!+c&Jh7Fp1J59Om~yY(lPLwI~PaVCA{)
z_+0e>GoUs7nm-;4y$2gzB}OAV!){x}FhVNSj56W}!j&klr>iIeg#z#4*MIsRQ_lHm
z2vcsCcM?^p8wN0`4x*y;9}T3PwYRqTc(@@ipEw0S*HeW1(%yXw^|bp_NaDswqU@l-
z>p0bRuNy{pA6Ox)6E!Rya|O;nEiBj}G?Tx)a6}QK1tF1F8Fs!jqF!C@?|bWjl(4W^
zEkt~MvCLz+U2T=l_Muh2+q+pWH-`g8-YWN2Mu7^bEQE7E>}FPR