From cbea2358f4a42c7a9cfae1fe9fe179417dfab288 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Mon, 29 Jan 2024 17:48:27 +0800 Subject: [PATCH 01/28] emodisc_dataprocess_update update a emogen's emodisctrain code, and dataset , dataprocess code. by Zhang Zhuoyuan --- requirements.txt | 1 - talkingface/data/dataprocess/audio.py | 136 ++++++++ .../data/dataprocess/emogen_dataprocess.py | 104 ++++++ .../data/dataprocess/face_detection/README.md | 1 + .../dataprocess/face_detection/__init__.py | 7 + .../data/dataprocess/face_detection/api.py | 79 +++++ .../face_detection/detection/__init__.py | 1 + .../face_detection/detection/core.py | 130 ++++++++ .../face_detection/detection/sfd/__init__.py | 1 + .../face_detection/detection/sfd/bbox.py | 129 ++++++++ .../face_detection/detection/sfd/detect.py | 112 +++++++ .../face_detection/detection/sfd/net_s3fd.py | 129 ++++++++ .../detection/sfd/sfd_detector.py | 59 ++++ .../data/dataprocess/face_detection/models.py | 261 +++++++++++++++ .../data/dataprocess/face_detection/utils.py | 313 ++++++++++++++++++ talkingface/data/dataprocess/hparams.py | 106 ++++++ talkingface/data/dataset/emogen_dataset.py | 129 ++++++++ talkingface/data/dataset/hparams.py | 106 ++++++ .../emogen_emo_disc.py | 77 +++++ talkingface/properties/emogen.yaml | 70 ++++ 20 files changed, 1950 insertions(+), 1 deletion(-) create mode 100644 talkingface/data/dataprocess/audio.py create mode 100644 talkingface/data/dataprocess/emogen_dataprocess.py create mode 100644 talkingface/data/dataprocess/face_detection/README.md create mode 100644 talkingface/data/dataprocess/face_detection/__init__.py create mode 100644 talkingface/data/dataprocess/face_detection/api.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/__init__.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/core.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/sfd/__init__.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/sfd/bbox.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/sfd/detect.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/sfd/net_s3fd.py create mode 100644 talkingface/data/dataprocess/face_detection/detection/sfd/sfd_detector.py create mode 100644 talkingface/data/dataprocess/face_detection/models.py create mode 100644 talkingface/data/dataprocess/face_detection/utils.py create mode 100644 talkingface/data/dataprocess/hparams.py create mode 100644 talkingface/data/dataset/emogen_dataset.py create mode 100644 talkingface/data/dataset/hparams.py create mode 100644 talkingface/model/audio_driven_talkingface/emogen_emo_disc.py create mode 100644 talkingface/properties/emogen.yaml diff --git a/requirements.txt b/requirements.txt index 1605c1fe..a9d5bb3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,6 @@ joblib==1.3.2 jsonschema==4.19.2 jsonschema-specifications==2023.7.1 kiwisolver==1.4.5 -kornia==0.5.5 lazy_loader==0.3 librosa==0.10.1 llvmlite==0.37.0 diff --git a/talkingface/data/dataprocess/audio.py b/talkingface/data/dataprocess/audio.py new file mode 100644 index 00000000..32b20c44 --- /dev/null +++ b/talkingface/data/dataprocess/audio.py @@ -0,0 +1,136 @@ +import librosa +import librosa.filters +import numpy as np +# import tensorflow as tf +from scipy import signal +from scipy.io import wavfile +from hparams import hparams as hp + +def load_wav(path, sr): + return librosa.core.load(path, sr=sr)[0] + +def save_wav(wav, path, sr): + wav *= 32767 / max(0.01, np.max(np.abs(wav))) + #proposed by @dsmiller + wavfile.write(path, sr, wav.astype(np.int16)) + +def save_wavenet_wav(wav, path, sr): + librosa.output.write_wav(path, wav, sr=sr) + +def preemphasis(wav, k, preemphasize=True): + if preemphasize: + return signal.lfilter([1, -k], [1], wav) + return wav + +def inv_preemphasis(wav, k, inv_preemphasize=True): + if inv_preemphasize: + return signal.lfilter([1], [1, -k], wav) + return wav + +def get_hop_size(): + hop_size = hp.hop_size + if hop_size is None: + assert hp.frame_shift_ms is not None + hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate) + return hop_size + +def linearspectrogram(wav): + D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) + S = _amp_to_db(np.abs(D)) - hp.ref_level_db + + if hp.signal_normalization: + return _normalize(S) + return S + +def melspectrogram(wav): + D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize)) + S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db + + if hp.signal_normalization: + return _normalize(S) + return S + +def _lws_processor(): + import lws + return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech") + +def _stft(y): + if hp.use_lws: + return _lws_processor(hp).stft(y).T + else: + return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size) + +########################################################## +#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!) +def num_frames(length, fsize, fshift): + """Compute number of time frames of spectrogram + """ + pad = (fsize - fshift) + if length % fshift == 0: + M = (length + pad * 2 - fsize) // fshift + 1 + else: + M = (length + pad * 2 - fsize) // fshift + 2 + return M + + +def pad_lr(x, fsize, fshift): + """Compute left and right padding + """ + M = num_frames(len(x), fsize, fshift) + pad = (fsize - fshift) + T = len(x) + 2 * pad + r = (M - 1) * fshift + fsize - T + return pad, pad + r +########################################################## +#Librosa correct padding +def librosa_pad_lr(x, fsize, fshift): + return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0] + +# Conversions +_mel_basis = None + +def _linear_to_mel(spectogram): + global _mel_basis + if _mel_basis is None: + _mel_basis = _build_mel_basis() + return np.dot(_mel_basis, spectogram) + +def _build_mel_basis(): + assert hp.fmax <= hp.sample_rate // 2 + return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, + fmin=hp.fmin, fmax=hp.fmax) + +def _amp_to_db(x): + min_level = np.exp(hp.min_level_db / 20 * np.log(10)) + return 20 * np.log10(np.maximum(min_level, x)) + +def _db_to_amp(x): + return np.power(10.0, (x) * 0.05) + +def _normalize(S): + if hp.allow_clipping_in_normalization: + if hp.symmetric_mels: + return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value, + -hp.max_abs_value, hp.max_abs_value) + else: + return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value) + + assert S.max() <= 0 and S.min() - hp.min_level_db >= 0 + if hp.symmetric_mels: + return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value + else: + return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)) + +def _denormalize(D): + if hp.allow_clipping_in_normalization: + if hp.symmetric_mels: + return (((np.clip(D, -hp.max_abs_value, + hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + + hp.min_level_db) + else: + return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) + + if hp.symmetric_mels: + return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db) + else: + return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db) diff --git a/talkingface/data/dataprocess/emogen_dataprocess.py b/talkingface/data/dataprocess/emogen_dataprocess.py new file mode 100644 index 00000000..e25ecaea --- /dev/null +++ b/talkingface/data/dataprocess/emogen_dataprocess.py @@ -0,0 +1,104 @@ +import argparse +import os +import subprocess +from glob import glob +from tqdm import tqdm +from concurrent.futures import ThreadPoolExecutor, as_completed +import cv2 +import numpy as np +import traceback +import audio +from hparams import hparams as hp + +import face_detection + +def modify_frame_rate(input_folder, output_folder, fps=25.0): + # 修改视频的帧率 + os.makedirs(output_folder, exist_ok=True) + fileList = [] + for root, dirnames, filenames in os.walk(input_folder): + for filename in filenames: + if filename.lower().endswith(('.mp4', '.mpg', '.mov', '.flv')): + fileList.append(os.path.join(root, filename)) + + for file in fileList: + subprocess.run("ffmpeg -i {} -r {} -y {}".format( + file, fps, os.path.join(output_folder, os.path.basename(file))), shell=True) + +def process_video_file(vfile, args, gpu_id, fa): + video_stream = cv2.VideoCapture(vfile) + frames = [] + while True: + still_reading, frame = video_stream.read() + if not still_reading: + video_stream.release() + break + frames.append(frame) + + vidname = os.path.basename(vfile).split('.')[0] + + fulldir = os.path.join(args.preprocessed_root, vidname) + os.makedirs(fulldir, exist_ok=True) + + batches = [frames[i:i + args.batch_size] for i in range(0, len(frames), args.batch_size)] + + i = -1 + for fb in batches: + preds = fa[gpu_id].get_detections_for_batch(np.asarray(fb)) + + for j, f in enumerate(preds): + i += 1 + if f is None: + continue + + x1, y1, x2, y2 = f + cv2.imwrite(os.path.join(fulldir, '{}.jpg'.format(i)), fb[j][y1:y2, x1:x2]) + +def process_audio_file(vfile, args): + vidname = os.path.basename(vfile).split('.')[0] + fulldir = os.path.join(args.preprocessed_root, vidname) + os.makedirs(fulldir, exist_ok=True) + + wavpath = os.path.join(fulldir, 'audio.wav') + + command = f"ffmpeg -loglevel panic -y -i {vfile} -strict -2 {wavpath}" + subprocess.call(command, shell=True) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--input_folder", type=str, help='Path to folder that contains original video files') + parser.add_argument("--output_folder", type=str, help='Path to folder for storing modified videos', default='modified_videos/') + parser.add_argument("--fps", type=float, help='Target FPS', default=25.0) + parser.add_argument("--ngpu", type=int, help='Number of GPUs across which to run in parallel', default=1) + parser.add_argument("--batch_size", type=int, help='Single GPU Face detection batch size', default=32) + parser.add_argument("--preprocessed_root", help="Root folder of the preprocessed dataset", required=True) + + args = parser.parse_args() + + # 第一步:修改视频帧率 + modify_frame_rate(args.input_folder, args.output_folder, args.fps) + + # 配置面部检测 + fa = [face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, + device=f'cuda:{id}') for id in range(args.ngpu)] + + # 第二步:视频和音频的数据预处理 + filelist = glob(os.path.join(args.output_folder, '*.mp4')) + + print('Started processing videos') + jobs = [(vfile, args, i % args.ngpu, fa[i % args.ngpu]) for i, vfile in enumerate(filelist)] + with ThreadPoolExecutor(args.ngpu) as p: + futures = [p.submit(process_video_file, *job) for job in jobs] + _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))] + + print('Dumping audios...') + for vfile in tqdm(filelist): + try: + process_audio_file(vfile, args) + except KeyboardInterrupt: + exit(0) + except: + traceback.print_exc() + +if __name__ == '__main__': + main() diff --git a/talkingface/data/dataprocess/face_detection/README.md b/talkingface/data/dataprocess/face_detection/README.md new file mode 100644 index 00000000..c073376e --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/README.md @@ -0,0 +1 @@ +The code for Face Detection in this folder has been taken from the wonderful [face_alignment](https://github.com/1adrianb/face-alignment) repository. This has been modified to take batches of faces at a time. \ No newline at end of file diff --git a/talkingface/data/dataprocess/face_detection/__init__.py b/talkingface/data/dataprocess/face_detection/__init__.py new file mode 100644 index 00000000..4bae29fd --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- + +__author__ = """Adrian Bulat""" +__email__ = 'adrian.bulat@nottingham.ac.uk' +__version__ = '1.0.1' + +from .api import FaceAlignment, LandmarksType, NetworkSize diff --git a/talkingface/data/dataprocess/face_detection/api.py b/talkingface/data/dataprocess/face_detection/api.py new file mode 100644 index 00000000..cb02d525 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/api.py @@ -0,0 +1,79 @@ +from __future__ import print_function +import os +import torch +from torch.utils.model_zoo import load_url +from enum import Enum +import numpy as np +import cv2 +try: + import urllib.request as request_file +except BaseException: + import urllib as request_file + +from .models import FAN, ResNetDepth +from .utils import * + + +class LandmarksType(Enum): + """Enum class defining the type of landmarks to detect. + + ``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face + ``_2halfD`` - this points represent the projection of the 3D points into 3D + ``_3D`` - detect the points ``(x,y,z)``` in a 3D space + + """ + _2D = 1 + _2halfD = 2 + _3D = 3 + + +class NetworkSize(Enum): + # TINY = 1 + # SMALL = 2 + # MEDIUM = 3 + LARGE = 4 + + def __new__(cls, value): + member = object.__new__(cls) + member._value_ = value + return member + + def __int__(self): + return self.value + +ROOT = os.path.dirname(os.path.abspath(__file__)) + +class FaceAlignment: + def __init__(self, landmarks_type, network_size=NetworkSize.LARGE, + device='cuda', flip_input=False, face_detector='sfd', verbose=False): + self.device = device + self.flip_input = flip_input + self.landmarks_type = landmarks_type + self.verbose = verbose + + network_size = int(network_size) + + if 'cuda' in device: + torch.backends.cudnn.benchmark = True + + # Get the face detector + face_detector_module = __import__('face_detection.detection.' + face_detector, + globals(), locals(), [face_detector], 0) + self.face_detector = face_detector_module.FaceDetector(device=device, verbose=verbose) + + def get_detections_for_batch(self, images): + images = images[..., ::-1] + detected_faces = self.face_detector.detect_from_batch(images.copy()) + results = [] + + for i, d in enumerate(detected_faces): + if len(d) == 0: + results.append(None) + continue + d = d[0] + d = np.clip(d, 0, None) + + x1, y1, x2, y2 = map(int, d[:-1]) + results.append((x1, y1, x2, y2)) + + return results \ No newline at end of file diff --git a/talkingface/data/dataprocess/face_detection/detection/__init__.py b/talkingface/data/dataprocess/face_detection/detection/__init__.py new file mode 100644 index 00000000..1a6b0402 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/__init__.py @@ -0,0 +1 @@ +from .core import FaceDetector \ No newline at end of file diff --git a/talkingface/data/dataprocess/face_detection/detection/core.py b/talkingface/data/dataprocess/face_detection/detection/core.py new file mode 100644 index 00000000..0f8275e8 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/core.py @@ -0,0 +1,130 @@ +import logging +import glob +from tqdm import tqdm +import numpy as np +import torch +import cv2 + + +class FaceDetector(object): + """An abstract class representing a face detector. + + Any other face detection implementation must subclass it. All subclasses + must implement ``detect_from_image``, that return a list of detected + bounding boxes. Optionally, for speed considerations detect from path is + recommended. + """ + + def __init__(self, device, verbose): + self.device = device + self.verbose = verbose + + if verbose: + if 'cpu' in device: + logger = logging.getLogger(__name__) + logger.warning("Detection running on CPU, this may be potentially slow.") + + if 'cpu' not in device and 'cuda' not in device: + if verbose: + logger.error("Expected values for device are: {cpu, cuda} but got: %s", device) + raise ValueError + + def detect_from_image(self, tensor_or_path): + """Detects faces in a given image. + + This function detects the faces present in a provided BGR(usually) + image. The input can be either the image itself or the path to it. + + Arguments: + tensor_or_path {numpy.ndarray, torch.tensor or string} -- the path + to an image or the image itself. + + Example:: + + >>> path_to_image = 'data/image_01.jpg' + ... detected_faces = detect_from_image(path_to_image) + [A list of bounding boxes (x1, y1, x2, y2)] + >>> image = cv2.imread(path_to_image) + ... detected_faces = detect_from_image(image) + [A list of bounding boxes (x1, y1, x2, y2)] + + """ + raise NotImplementedError + + def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True): + """Detects faces from all the images present in a given directory. + + Arguments: + path {string} -- a string containing a path that points to the folder containing the images + + Keyword Arguments: + extensions {list} -- list of string containing the extensions to be + consider in the following format: ``.extension_name`` (default: + {['.jpg', '.png']}) recursive {bool} -- option wherever to scan the + folder recursively (default: {False}) show_progress_bar {bool} -- + display a progressbar (default: {True}) + + Example: + >>> directory = 'data' + ... detected_faces = detect_from_directory(directory) + {A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]} + + """ + if self.verbose: + logger = logging.getLogger(__name__) + + if len(extensions) == 0: + if self.verbose: + logger.error("Expected at list one extension, but none was received.") + raise ValueError + + if self.verbose: + logger.info("Constructing the list of images.") + additional_pattern = '/**/*' if recursive else '/*' + files = [] + for extension in extensions: + files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive)) + + if self.verbose: + logger.info("Finished searching for images. %s images found", len(files)) + logger.info("Preparing to run the detection.") + + predictions = {} + for image_path in tqdm(files, disable=not show_progress_bar): + if self.verbose: + logger.info("Running the face detector on image: %s", image_path) + predictions[image_path] = self.detect_from_image(image_path) + + if self.verbose: + logger.info("The detector was successfully run on all %s images", len(files)) + + return predictions + + @property + def reference_scale(self): + raise NotImplementedError + + @property + def reference_x_shift(self): + raise NotImplementedError + + @property + def reference_y_shift(self): + raise NotImplementedError + + @staticmethod + def tensor_or_path_to_ndarray(tensor_or_path, rgb=True): + """Convert path (represented as a string) or torch.tensor to a numpy.ndarray + + Arguments: + tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself + """ + if isinstance(tensor_or_path, str): + return cv2.imread(tensor_or_path) if not rgb else cv2.imread(tensor_or_path)[..., ::-1] + elif torch.is_tensor(tensor_or_path): + # Call cpu in case its coming from cuda + return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy() + elif isinstance(tensor_or_path, np.ndarray): + return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path + else: + raise TypeError diff --git a/talkingface/data/dataprocess/face_detection/detection/sfd/__init__.py b/talkingface/data/dataprocess/face_detection/detection/sfd/__init__.py new file mode 100644 index 00000000..5a63ecd4 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/sfd/__init__.py @@ -0,0 +1 @@ +from .sfd_detector import SFDDetector as FaceDetector \ No newline at end of file diff --git a/talkingface/data/dataprocess/face_detection/detection/sfd/bbox.py b/talkingface/data/dataprocess/face_detection/detection/sfd/bbox.py new file mode 100644 index 00000000..4bd7222e --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/sfd/bbox.py @@ -0,0 +1,129 @@ +from __future__ import print_function +import os +import sys +import cv2 +import random +import datetime +import time +import math +import argparse +import numpy as np +import torch + +try: + from iou import IOU +except BaseException: + # IOU cython speedup 10x + def IOU(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2): + sa = abs((ax2 - ax1) * (ay2 - ay1)) + sb = abs((bx2 - bx1) * (by2 - by1)) + x1, y1 = max(ax1, bx1), max(ay1, by1) + x2, y2 = min(ax2, bx2), min(ay2, by2) + w = x2 - x1 + h = y2 - y1 + if w < 0 or h < 0: + return 0.0 + else: + return 1.0 * w * h / (sa + sb - w * h) + + +def bboxlog(x1, y1, x2, y2, axc, ayc, aww, ahh): + xc, yc, ww, hh = (x2 + x1) / 2, (y2 + y1) / 2, x2 - x1, y2 - y1 + dx, dy = (xc - axc) / aww, (yc - ayc) / ahh + dw, dh = math.log(ww / aww), math.log(hh / ahh) + return dx, dy, dw, dh + + +def bboxloginv(dx, dy, dw, dh, axc, ayc, aww, ahh): + xc, yc = dx * aww + axc, dy * ahh + ayc + ww, hh = math.exp(dw) * aww, math.exp(dh) * ahh + x1, x2, y1, y2 = xc - ww / 2, xc + ww / 2, yc - hh / 2, yc + hh / 2 + return x1, y1, x2, y2 + + +def nms(dets, thresh): + if 0 == len(dets): + return [] + x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4] + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]]) + xx2, yy2 = np.minimum(x2[i], x2[order[1:]]), np.minimum(y2[i], y2[order[1:]]) + + w, h = np.maximum(0.0, xx2 - xx1 + 1), np.maximum(0.0, yy2 - yy1 + 1) + ovr = w * h / (areas[i] + areas[order[1:]] - w * h) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep + + +def encode(matched, priors, variances): + """Encode the variances from the priorbox layers into the ground truth boxes + we have matched (based on jaccard overlap) with the prior boxes. + Args: + matched: (tensor) Coords of ground truth for each prior in point-form + Shape: [num_priors, 4]. + priors: (tensor) Prior boxes in center-offset form + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + encoded boxes (tensor), Shape: [num_priors, 4] + """ + + # dist b/t match center and prior's center + g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2] + # encode variance + g_cxcy /= (variances[0] * priors[:, 2:]) + # match wh / prior wh + g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] + g_wh = torch.log(g_wh) / variances[1] + # return target for smooth_l1_loss + return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] + + +def decode(loc, priors, variances): + """Decode locations from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + loc (tensor): location predictions for loc layers, + Shape: [num_priors,4] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded bounding box predictions + """ + + boxes = torch.cat(( + priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], + priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) + boxes[:, :2] -= boxes[:, 2:] / 2 + boxes[:, 2:] += boxes[:, :2] + return boxes + +def batch_decode(loc, priors, variances): + """Decode locations from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + loc (tensor): location predictions for loc layers, + Shape: [num_priors,4] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded bounding box predictions + """ + + boxes = torch.cat(( + priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:], + priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2) + boxes[:, :, :2] -= boxes[:, :, 2:] / 2 + boxes[:, :, 2:] += boxes[:, :, :2] + return boxes diff --git a/talkingface/data/dataprocess/face_detection/detection/sfd/detect.py b/talkingface/data/dataprocess/face_detection/detection/sfd/detect.py new file mode 100644 index 00000000..efef6273 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/sfd/detect.py @@ -0,0 +1,112 @@ +import torch +import torch.nn.functional as F + +import os +import sys +import cv2 +import random +import datetime +import math +import argparse +import numpy as np + +import scipy.io as sio +import zipfile +from .net_s3fd import s3fd +from .bbox import * + + +def detect(net, img, device): + img = img - np.array([104, 117, 123]) + img = img.transpose(2, 0, 1) + img = img.reshape((1,) + img.shape) + + if 'cuda' in device: + torch.backends.cudnn.benchmark = True + + img = torch.from_numpy(img).float().to(device) + BB, CC, HH, WW = img.size() + with torch.no_grad(): + olist = net(img) + + bboxlist = [] + for i in range(len(olist) // 2): + olist[i * 2] = F.softmax(olist[i * 2], dim=1) + olist = [oelem.data.cpu() for oelem in olist] + for i in range(len(olist) // 2): + ocls, oreg = olist[i * 2], olist[i * 2 + 1] + FB, FC, FH, FW = ocls.size() # feature map size + stride = 2**(i + 2) # 4,8,16,32,64,128 + anchor = stride * 4 + poss = zip(*np.where(ocls[:, 1, :, :] > 0.05)) + for Iindex, hindex, windex in poss: + axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride + score = ocls[0, 1, hindex, windex] + loc = oreg[0, :, hindex, windex].contiguous().view(1, 4) + priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]) + variances = [0.1, 0.2] + box = decode(loc, priors, variances) + x1, y1, x2, y2 = box[0] * 1.0 + # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1) + bboxlist.append([x1, y1, x2, y2, score]) + bboxlist = np.array(bboxlist) + if 0 == len(bboxlist): + bboxlist = np.zeros((1, 5)) + + return bboxlist + +def batch_detect(net, imgs, device): + imgs = imgs - np.array([104, 117, 123]) + imgs = imgs.transpose(0, 3, 1, 2) + + if 'cuda' in device: + torch.backends.cudnn.benchmark = True + + imgs = torch.from_numpy(imgs).float().to(device) + BB, CC, HH, WW = imgs.size() + with torch.no_grad(): + olist = net(imgs) + + bboxlist = [] + for i in range(len(olist) // 2): + olist[i * 2] = F.softmax(olist[i * 2], dim=1) + olist = [oelem.data.cpu() for oelem in olist] + for i in range(len(olist) // 2): + ocls, oreg = olist[i * 2], olist[i * 2 + 1] + FB, FC, FH, FW = ocls.size() # feature map size + stride = 2**(i + 2) # 4,8,16,32,64,128 + anchor = stride * 4 + poss = zip(*np.where(ocls[:, 1, :, :] > 0.05)) + for Iindex, hindex, windex in poss: + axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride + score = ocls[:, 1, hindex, windex] + loc = oreg[:, :, hindex, windex].contiguous().view(BB, 1, 4) + priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]).view(1, 1, 4) + variances = [0.1, 0.2] + box = batch_decode(loc, priors, variances) + box = box[:, 0] * 1.0 + # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1) + bboxlist.append(torch.cat([box, score.unsqueeze(1)], 1).cpu().numpy()) + bboxlist = np.array(bboxlist) + if 0 == len(bboxlist): + bboxlist = np.zeros((1, BB, 5)) + + return bboxlist + +def flip_detect(net, img, device): + img = cv2.flip(img, 1) + b = detect(net, img, device) + + bboxlist = np.zeros(b.shape) + bboxlist[:, 0] = img.shape[1] - b[:, 2] + bboxlist[:, 1] = b[:, 1] + bboxlist[:, 2] = img.shape[1] - b[:, 0] + bboxlist[:, 3] = b[:, 3] + bboxlist[:, 4] = b[:, 4] + return bboxlist + + +def pts_to_bb(pts): + min_x, min_y = np.min(pts, axis=0) + max_x, max_y = np.max(pts, axis=0) + return np.array([min_x, min_y, max_x, max_y]) diff --git a/talkingface/data/dataprocess/face_detection/detection/sfd/net_s3fd.py b/talkingface/data/dataprocess/face_detection/detection/sfd/net_s3fd.py new file mode 100644 index 00000000..fc64313c --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/sfd/net_s3fd.py @@ -0,0 +1,129 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class L2Norm(nn.Module): + def __init__(self, n_channels, scale=1.0): + super(L2Norm, self).__init__() + self.n_channels = n_channels + self.scale = scale + self.eps = 1e-10 + self.weight = nn.Parameter(torch.Tensor(self.n_channels)) + self.weight.data *= 0.0 + self.weight.data += self.scale + + def forward(self, x): + norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps + x = x / norm * self.weight.view(1, -1, 1, 1) + return x + + +class s3fd(nn.Module): + def __init__(self): + super(s3fd, self).__init__() + self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1) + self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) + + self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) + self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1) + + self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1) + self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) + self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1) + + self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1) + self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) + self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) + + self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) + self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) + self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1) + + self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3) + self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0) + + self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0) + self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) + + self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0) + self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) + + self.conv3_3_norm = L2Norm(256, scale=10) + self.conv4_3_norm = L2Norm(512, scale=8) + self.conv5_3_norm = L2Norm(512, scale=5) + + self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1) + self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1) + self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1) + self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1) + self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1) + self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1) + + self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding=1) + self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding=1) + self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1) + self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1) + self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding=1) + self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1) + + def forward(self, x): + h = F.relu(self.conv1_1(x)) + h = F.relu(self.conv1_2(h)) + h = F.max_pool2d(h, 2, 2) + + h = F.relu(self.conv2_1(h)) + h = F.relu(self.conv2_2(h)) + h = F.max_pool2d(h, 2, 2) + + h = F.relu(self.conv3_1(h)) + h = F.relu(self.conv3_2(h)) + h = F.relu(self.conv3_3(h)) + f3_3 = h + h = F.max_pool2d(h, 2, 2) + + h = F.relu(self.conv4_1(h)) + h = F.relu(self.conv4_2(h)) + h = F.relu(self.conv4_3(h)) + f4_3 = h + h = F.max_pool2d(h, 2, 2) + + h = F.relu(self.conv5_1(h)) + h = F.relu(self.conv5_2(h)) + h = F.relu(self.conv5_3(h)) + f5_3 = h + h = F.max_pool2d(h, 2, 2) + + h = F.relu(self.fc6(h)) + h = F.relu(self.fc7(h)) + ffc7 = h + h = F.relu(self.conv6_1(h)) + h = F.relu(self.conv6_2(h)) + f6_2 = h + h = F.relu(self.conv7_1(h)) + h = F.relu(self.conv7_2(h)) + f7_2 = h + + f3_3 = self.conv3_3_norm(f3_3) + f4_3 = self.conv4_3_norm(f4_3) + f5_3 = self.conv5_3_norm(f5_3) + + cls1 = self.conv3_3_norm_mbox_conf(f3_3) + reg1 = self.conv3_3_norm_mbox_loc(f3_3) + cls2 = self.conv4_3_norm_mbox_conf(f4_3) + reg2 = self.conv4_3_norm_mbox_loc(f4_3) + cls3 = self.conv5_3_norm_mbox_conf(f5_3) + reg3 = self.conv5_3_norm_mbox_loc(f5_3) + cls4 = self.fc7_mbox_conf(ffc7) + reg4 = self.fc7_mbox_loc(ffc7) + cls5 = self.conv6_2_mbox_conf(f6_2) + reg5 = self.conv6_2_mbox_loc(f6_2) + cls6 = self.conv7_2_mbox_conf(f7_2) + reg6 = self.conv7_2_mbox_loc(f7_2) + + # max-out background label + chunk = torch.chunk(cls1, 4, 1) + bmax = torch.max(torch.max(chunk[0], chunk[1]), chunk[2]) + cls1 = torch.cat([bmax, chunk[3]], dim=1) + + return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6] diff --git a/talkingface/data/dataprocess/face_detection/detection/sfd/sfd_detector.py b/talkingface/data/dataprocess/face_detection/detection/sfd/sfd_detector.py new file mode 100644 index 00000000..8fbce152 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/detection/sfd/sfd_detector.py @@ -0,0 +1,59 @@ +import os +import cv2 +from torch.utils.model_zoo import load_url + +from ..core import FaceDetector + +from .net_s3fd import s3fd +from .bbox import * +from .detect import * + +models_urls = { + 's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth', +} + + +class SFDDetector(FaceDetector): + def __init__(self, device, path_to_detector=os.path.join(os.path.dirname(os.path.abspath(__file__)), 's3fd.pth'), verbose=False): + super(SFDDetector, self).__init__(device, verbose) + + # Initialise the face detector + if not os.path.isfile(path_to_detector): + model_weights = load_url(models_urls['s3fd']) + else: + model_weights = torch.load(path_to_detector) + + self.face_detector = s3fd() + self.face_detector.load_state_dict(model_weights) + self.face_detector.to(device) + self.face_detector.eval() + + def detect_from_image(self, tensor_or_path): + image = self.tensor_or_path_to_ndarray(tensor_or_path) + + bboxlist = detect(self.face_detector, image, device=self.device) + keep = nms(bboxlist, 0.3) + bboxlist = bboxlist[keep, :] + bboxlist = [x for x in bboxlist if x[-1] > 0.5] + + return bboxlist + + def detect_from_batch(self, images): + bboxlists = batch_detect(self.face_detector, images, device=self.device) + keeps = [nms(bboxlists[:, i, :], 0.3) for i in range(bboxlists.shape[1])] + bboxlists = [bboxlists[keep, i, :] for i, keep in enumerate(keeps)] + bboxlists = [[x for x in bboxlist if x[-1] > 0.5] for bboxlist in bboxlists] + + return bboxlists + + @property + def reference_scale(self): + return 195 + + @property + def reference_x_shift(self): + return 0 + + @property + def reference_y_shift(self): + return 0 diff --git a/talkingface/data/dataprocess/face_detection/models.py b/talkingface/data/dataprocess/face_detection/models.py new file mode 100644 index 00000000..ee2dde32 --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/models.py @@ -0,0 +1,261 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + + +def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, + stride=strd, padding=padding, bias=bias) + + +class ConvBlock(nn.Module): + def __init__(self, in_planes, out_planes): + super(ConvBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_planes) + self.conv1 = conv3x3(in_planes, int(out_planes / 2)) + self.bn2 = nn.BatchNorm2d(int(out_planes / 2)) + self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4)) + self.bn3 = nn.BatchNorm2d(int(out_planes / 4)) + self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4)) + + if in_planes != out_planes: + self.downsample = nn.Sequential( + nn.BatchNorm2d(in_planes), + nn.ReLU(True), + nn.Conv2d(in_planes, out_planes, + kernel_size=1, stride=1, bias=False), + ) + else: + self.downsample = None + + def forward(self, x): + residual = x + + out1 = self.bn1(x) + out1 = F.relu(out1, True) + out1 = self.conv1(out1) + + out2 = self.bn2(out1) + out2 = F.relu(out2, True) + out2 = self.conv2(out2) + + out3 = self.bn3(out2) + out3 = F.relu(out3, True) + out3 = self.conv3(out3) + + out3 = torch.cat((out1, out2, out3), 1) + + if self.downsample is not None: + residual = self.downsample(residual) + + out3 += residual + + return out3 + + +class Bottleneck(nn.Module): + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class HourGlass(nn.Module): + def __init__(self, num_modules, depth, num_features): + super(HourGlass, self).__init__() + self.num_modules = num_modules + self.depth = depth + self.features = num_features + + self._generate_network(self.depth) + + def _generate_network(self, level): + self.add_module('b1_' + str(level), ConvBlock(self.features, self.features)) + + self.add_module('b2_' + str(level), ConvBlock(self.features, self.features)) + + if level > 1: + self._generate_network(level - 1) + else: + self.add_module('b2_plus_' + str(level), ConvBlock(self.features, self.features)) + + self.add_module('b3_' + str(level), ConvBlock(self.features, self.features)) + + def _forward(self, level, inp): + # Upper branch + up1 = inp + up1 = self._modules['b1_' + str(level)](up1) + + # Lower branch + low1 = F.avg_pool2d(inp, 2, stride=2) + low1 = self._modules['b2_' + str(level)](low1) + + if level > 1: + low2 = self._forward(level - 1, low1) + else: + low2 = low1 + low2 = self._modules['b2_plus_' + str(level)](low2) + + low3 = low2 + low3 = self._modules['b3_' + str(level)](low3) + + up2 = F.interpolate(low3, scale_factor=2, mode='nearest') + + return up1 + up2 + + def forward(self, x): + return self._forward(self.depth, x) + + +class FAN(nn.Module): + + def __init__(self, num_modules=1): + super(FAN, self).__init__() + self.num_modules = num_modules + + # Base part + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) + self.bn1 = nn.BatchNorm2d(64) + self.conv2 = ConvBlock(64, 128) + self.conv3 = ConvBlock(128, 128) + self.conv4 = ConvBlock(128, 256) + + # Stacking part + for hg_module in range(self.num_modules): + self.add_module('m' + str(hg_module), HourGlass(1, 4, 256)) + self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256)) + self.add_module('conv_last' + str(hg_module), + nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) + self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256)) + self.add_module('l' + str(hg_module), nn.Conv2d(256, + 68, kernel_size=1, stride=1, padding=0)) + + if hg_module < self.num_modules - 1: + self.add_module( + 'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) + self.add_module('al' + str(hg_module), nn.Conv2d(68, + 256, kernel_size=1, stride=1, padding=0)) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x)), True) + x = F.avg_pool2d(self.conv2(x), 2, stride=2) + x = self.conv3(x) + x = self.conv4(x) + + previous = x + + outputs = [] + for i in range(self.num_modules): + hg = self._modules['m' + str(i)](previous) + + ll = hg + ll = self._modules['top_m_' + str(i)](ll) + + ll = F.relu(self._modules['bn_end' + str(i)] + (self._modules['conv_last' + str(i)](ll)), True) + + # Predict heatmaps + tmp_out = self._modules['l' + str(i)](ll) + outputs.append(tmp_out) + + if i < self.num_modules - 1: + ll = self._modules['bl' + str(i)](ll) + tmp_out_ = self._modules['al' + str(i)](tmp_out) + previous = previous + ll + tmp_out_ + + return outputs + + +class ResNetDepth(nn.Module): + + def __init__(self, block=Bottleneck, layers=[3, 8, 36, 3], num_classes=68): + self.inplanes = 64 + super(ResNetDepth, self).__init__() + self.conv1 = nn.Conv2d(3 + 68, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AvgPool2d(7) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x diff --git a/talkingface/data/dataprocess/face_detection/utils.py b/talkingface/data/dataprocess/face_detection/utils.py new file mode 100644 index 00000000..3dc4cf3e --- /dev/null +++ b/talkingface/data/dataprocess/face_detection/utils.py @@ -0,0 +1,313 @@ +from __future__ import print_function +import os +import sys +import time +import torch +import math +import numpy as np +import cv2 + + +def _gaussian( + size=3, sigma=0.25, amplitude=1, normalize=False, width=None, + height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5, + mean_vert=0.5): + # handle some defaults + if width is None: + width = size + if height is None: + height = size + if sigma_horz is None: + sigma_horz = sigma + if sigma_vert is None: + sigma_vert = sigma + center_x = mean_horz * width + 0.5 + center_y = mean_vert * height + 0.5 + gauss = np.empty((height, width), dtype=np.float32) + # generate kernel + for i in range(height): + for j in range(width): + gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / ( + sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0)) + if normalize: + gauss = gauss / np.sum(gauss) + return gauss + + +def draw_gaussian(image, point, sigma): + # Check if the gaussian is inside + ul = [math.floor(point[0] - 3 * sigma), math.floor(point[1] - 3 * sigma)] + br = [math.floor(point[0] + 3 * sigma), math.floor(point[1] + 3 * sigma)] + if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1): + return image + size = 6 * sigma + 1 + g = _gaussian(size) + g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))] + g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))] + img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))] + img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))] + assert (g_x[0] > 0 and g_y[1] > 0) + image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1] + ] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]] + image[image > 1] = 1 + return image + + +def transform(point, center, scale, resolution, invert=False): + """Generate and affine transformation matrix. + + Given a set of points, a center, a scale and a targer resolution, the + function generates and affine transformation matrix. If invert is ``True`` + it will produce the inverse transformation. + + Arguments: + point {torch.tensor} -- the input 2D point + center {torch.tensor or numpy.array} -- the center around which to perform the transformations + scale {float} -- the scale of the face/object + resolution {float} -- the output resolution + + Keyword Arguments: + invert {bool} -- define wherever the function should produce the direct or the + inverse transformation matrix (default: {False}) + """ + _pt = torch.ones(3) + _pt[0] = point[0] + _pt[1] = point[1] + + h = 200.0 * scale + t = torch.eye(3) + t[0, 0] = resolution / h + t[1, 1] = resolution / h + t[0, 2] = resolution * (-center[0] / h + 0.5) + t[1, 2] = resolution * (-center[1] / h + 0.5) + + if invert: + t = torch.inverse(t) + + new_point = (torch.matmul(t, _pt))[0:2] + + return new_point.int() + + +def crop(image, center, scale, resolution=256.0): + """Center crops an image or set of heatmaps + + Arguments: + image {numpy.array} -- an rgb image + center {numpy.array} -- the center of the object, usually the same as of the bounding box + scale {float} -- scale of the face + + Keyword Arguments: + resolution {float} -- the size of the output cropped image (default: {256.0}) + + Returns: + [type] -- [description] + """ # Crop around the center point + """ Crops the image around the center. Input is expected to be an np.ndarray """ + ul = transform([1, 1], center, scale, resolution, True) + br = transform([resolution, resolution], center, scale, resolution, True) + # pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0) + if image.ndim > 2: + newDim = np.array([br[1] - ul[1], br[0] - ul[0], + image.shape[2]], dtype=np.int32) + newImg = np.zeros(newDim, dtype=np.uint8) + else: + newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int) + newImg = np.zeros(newDim, dtype=np.uint8) + ht = image.shape[0] + wd = image.shape[1] + newX = np.array( + [max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32) + newY = np.array( + [max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32) + oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32) + oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32) + newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1] + ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :] + newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)), + interpolation=cv2.INTER_LINEAR) + return newImg + + +def get_preds_fromhm(hm, center=None, scale=None): + """Obtain (x,y) coordinates given a set of N heatmaps. If the center + and the scale is provided the function will return the points also in + the original coordinate frame. + + Arguments: + hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] + + Keyword Arguments: + center {torch.tensor} -- the center of the bounding box (default: {None}) + scale {float} -- face scale (default: {None}) + """ + max, idx = torch.max( + hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2) + idx += 1 + preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() + preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1) + preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1) + + for i in range(preds.size(0)): + for j in range(preds.size(1)): + hm_ = hm[i, j, :] + pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1 + if pX > 0 and pX < 63 and pY > 0 and pY < 63: + diff = torch.FloatTensor( + [hm_[pY, pX + 1] - hm_[pY, pX - 1], + hm_[pY + 1, pX] - hm_[pY - 1, pX]]) + preds[i, j].add_(diff.sign_().mul_(.25)) + + preds.add_(-.5) + + preds_orig = torch.zeros(preds.size()) + if center is not None and scale is not None: + for i in range(hm.size(0)): + for j in range(hm.size(1)): + preds_orig[i, j] = transform( + preds[i, j], center, scale, hm.size(2), True) + + return preds, preds_orig + +def get_preds_fromhm_batch(hm, centers=None, scales=None): + """Obtain (x,y) coordinates given a set of N heatmaps. If the centers + and the scales is provided the function will return the points also in + the original coordinate frame. + + Arguments: + hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H] + + Keyword Arguments: + centers {torch.tensor} -- the centers of the bounding box (default: {None}) + scales {float} -- face scales (default: {None}) + """ + max, idx = torch.max( + hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2) + idx += 1 + preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float() + preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1) + preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1) + + for i in range(preds.size(0)): + for j in range(preds.size(1)): + hm_ = hm[i, j, :] + pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1 + if pX > 0 and pX < 63 and pY > 0 and pY < 63: + diff = torch.FloatTensor( + [hm_[pY, pX + 1] - hm_[pY, pX - 1], + hm_[pY + 1, pX] - hm_[pY - 1, pX]]) + preds[i, j].add_(diff.sign_().mul_(.25)) + + preds.add_(-.5) + + preds_orig = torch.zeros(preds.size()) + if centers is not None and scales is not None: + for i in range(hm.size(0)): + for j in range(hm.size(1)): + preds_orig[i, j] = transform( + preds[i, j], centers[i], scales[i], hm.size(2), True) + + return preds, preds_orig + +def shuffle_lr(parts, pairs=None): + """Shuffle the points left-right according to the axis of symmetry + of the object. + + Arguments: + parts {torch.tensor} -- a 3D or 4D object containing the + heatmaps. + + Keyword Arguments: + pairs {list of integers} -- [order of the flipped points] (default: {None}) + """ + if pairs is None: + pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35, + 34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41, + 40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63, + 62, 61, 60, 67, 66, 65] + if parts.ndimension() == 3: + parts = parts[pairs, ...] + else: + parts = parts[:, pairs, ...] + + return parts + + +def flip(tensor, is_label=False): + """Flip an image or a set of heatmaps left-right + + Arguments: + tensor {numpy.array or torch.tensor} -- [the input image or heatmaps] + + Keyword Arguments: + is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False}) + """ + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + + if is_label: + tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1) + else: + tensor = tensor.flip(tensor.ndimension() - 1) + + return tensor + +# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py) + + +def appdata_dir(appname=None, roaming=False): + """ appdata_dir(appname=None, roaming=False) + + Get the path to the application directory, where applications are allowed + to write user specific files (e.g. configurations). For non-user specific + data, consider using common_appdata_dir(). + If appname is given, a subdir is appended (and created if necessary). + If roaming is True, will prefer a roaming directory (Windows Vista/7). + """ + + # Define default user directory + userDir = os.getenv('FACEALIGNMENT_USERDIR', None) + if userDir is None: + userDir = os.path.expanduser('~') + if not os.path.isdir(userDir): # pragma: no cover + userDir = '/var/tmp' # issue #54 + + # Get system app data dir + path = None + if sys.platform.startswith('win'): + path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA') + path = (path2 or path1) if roaming else (path1 or path2) + elif sys.platform.startswith('darwin'): + path = os.path.join(userDir, 'Library', 'Application Support') + # On Linux and as fallback + if not (path and os.path.isdir(path)): + path = userDir + + # Maybe we should store things local to the executable (in case of a + # portable distro or a frozen application that wants to be portable) + prefix = sys.prefix + if getattr(sys, 'frozen', None): + prefix = os.path.abspath(os.path.dirname(sys.executable)) + for reldir in ('settings', '../settings'): + localpath = os.path.abspath(os.path.join(prefix, reldir)) + if os.path.isdir(localpath): # pragma: no cover + try: + open(os.path.join(localpath, 'test.write'), 'wb').close() + os.remove(os.path.join(localpath, 'test.write')) + except IOError: + pass # We cannot write in this directory + else: + path = localpath + break + + # Get path specific for this app + if appname: + if path == userDir: + appname = '.' + appname.lstrip('.') # Make it a hidden directory + path = os.path.join(path, appname) + if not os.path.isdir(path): # pragma: no cover + os.mkdir(path) + + # Done + return path diff --git a/talkingface/data/dataprocess/hparams.py b/talkingface/data/dataprocess/hparams.py new file mode 100644 index 00000000..f9b73de9 --- /dev/null +++ b/talkingface/data/dataprocess/hparams.py @@ -0,0 +1,106 @@ +from glob import glob +import os + +def get_image_list(data_root, split): + filelist = [] + + with open('filelists/{}.txt'.format(split)) as f: + for line in f: + line = line.strip() + if ' ' in line: line = line.split()[0] + filelist.append(os.path.join(data_root, line)) + + return filelist + +class HParams: + def __init__(self, **kwargs): + self.data = {} + + for key, value in kwargs.items(): + self.data[key] = value + + def __getattr__(self, key): + if key not in self.data: + raise AttributeError("'HParams' object has no attribute %s" % key) + return self.data[key] + + def set_hparam(self, key, value): + self.data[key] = value + + +# Default hyperparameters +hparams = HParams( + num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality + # network + rescale=True, # Whether to rescale audio prior to preprocessing + rescaling_max=0.9, # Rescaling value + + # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction + # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder + # Does not work if n_ffit is not multiple of hop_size!! + use_lws=False, + + n_fft=800, # Extra window size is filled with 0 paddings to match this parameter + hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate) + win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate) + sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i ) + + frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5) + + # Mel and Linear spectrograms normalization/scaling and clipping + signal_normalization=True, + # Whether to normalize mel spectrograms to some predefined range (following below parameters) + allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True + symmetric_mels=True, + # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, + # faster and cleaner convergence) + max_abs_value=4., + # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not + # be too big to avoid gradient explosion, + # not too small for fast convergence) + # Contribution by @begeekmyfriend + # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude + # levels. Also allows for better G&L phase reconstruction) + preemphasize=True, # whether to apply filter + preemphasis=0.97, # filter coefficient. + + # Limits + min_level_db=-100, + ref_level_db=20, + fmin=55, + # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To + # test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) + fmax=7600, # To be increased/reduced depending on data. + + ###################### Our training parameters ################################# + img_size=96, + fps=25, + + batch_size=16, + initial_learning_rate=1e-4, + nepochs=200000000000000000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs + num_workers=16, + checkpoint_interval=10000, + eval_interval=3000, + save_optimizer_state=True, + + syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. + syncnet_batch_size=128, + syncnet_lr=1e-4, + syncnet_eval_interval=500, + syncnet_checkpoint_interval=1000, + + emo_wt=0.01, + pl_wt=0.01, + disc_wt=0.07, + disc_initial_learning_rate=1e-4, + + beta_1=0.999, + beta_2=0.999, +) + + +def hparams_debug_string(): + values = hparams.values() + hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"] + return "Hyperparameters:\n" + "\n".join(hp) diff --git a/talkingface/data/dataset/emogen_dataset.py b/talkingface/data/dataset/emogen_dataset.py new file mode 100644 index 00000000..fb94d09e --- /dev/null +++ b/talkingface/data/dataset/emogen_dataset.py @@ -0,0 +1,129 @@ +import os +from os.path import join, isfile, isdir, splitext, basename +import numpy as np +import random +from glob import glob + +import torch +from torch.utils.data import DataLoader, Dataset +import cv2 + +from hparams import hparams + +def to_categorical(y, num_classes=None, dtype='float32'): + """Converts a class vector (integers) to binary class matrix. + E.g. for use with categorical_crossentropy. + # Arguments + y: class vector to be converted into a matrix + (integers from 0 to num_classes). + num_classes: total number of classes. + dtype: The data type expected by the input, as a string + (`float32`, `float64`, `int32`...) + # Returns + A binary matrix representation of the input. The classes axis + is placed last. + # Example + ```python + # Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}: + > labels + array([0, 2, 1, 2, 0]) + # `to_categorical` converts this into a matrix with as many + # columns as there are classes. The number of rows + # stays the same. + > to_categorical(labels) + array([[ 1., 0., 0.], + [ 0., 0., 1.], + [ 0., 1., 0.], + [ 0., 0., 1.], + [ 1., 0., 0.]], dtype=float32) + ``` + """ + + y = np.array(y, dtype='int') + input_shape = y.shape + if input_shape and input_shape[-1] == 1 and len(input_shape) > 1: + input_shape = tuple(input_shape[:-1]) + y = y.ravel() + if not num_classes: + num_classes = np.max(y) + n = y.shape[0] + categorical = np.zeros((n, num_classes), dtype=dtype) + categorical[np.arange(n), y] = 1 + output_shape = input_shape + (num_classes,) + categorical = np.reshape(categorical, output_shape) + return categorical +# 假设其他函数如 to_categorical 等已定义 + +emotion_dict = {'ANG':0, 'DIS':1, 'FEA':2, 'HAP':3, 'NEU':4, 'SAD':5} +intensity_dict = {'XX':0, 'LO':1, 'MD':2, 'HI':3} +emonet_T = 5 + +class EmotionDataset(Dataset): + def __init__(self, config, datasplit): + super().__init__() + self.config = config + self.split = datasplit + + path_key = f"{datasplit}_filelist" + if path_key not in self.config: + raise ValueError(f"Path for {datasplit} data is not defined in config") + + self.path = self.config[path_key] + + self.all_videos = [f for f in os.listdir(self.path) if isdir(join(self.path, f))] + + self.filelist = [] + for filename in self.all_videos: + labels = splitext(filename)[0].split('_') + emotion = emotion_dict[labels[2]] + emotion_intensity = intensity_dict[labels[3]] + + # For validation, only use high intensity + if datasplit == 'val' and emotion_intensity != 3: + continue + + self.filelist.append((filename, emotion, emotion_intensity)) + + print('Num files: ', len(self.filelist)) + + # other methods like get_frame_id, get_window, read_window, and prepare_window remain the same + + def __len__(self): + return len(self.filelist) + + def __getitem__(self, idx): + while True: + idx = random.randint(0, len(self.filelist) - 1) + filename = self.filelist[idx] + vidname = filename[0] + emotion = int(filename[1]) + emotion = to_categorical(emotion, num_classes=6) + + img_names = list(glob(join(self.path, vidname, '*.jpg'))) + + if len(img_names) <= 3 * emonet_T: + continue + img_name = random.choice(img_names) + + window_fnames = self.get_window(img_name) + if window_fnames is None: + continue + + window = self.read_window(window_fnames) + if window is None: + continue + + x = self.prepare_window(window) + x = torch.FloatTensor(x) + + data = { + 'input': x, + 'emotion': emotion + } + + return data + +# Example usage +# config = {'train_filelist': 'path/to/train', 'val_filelist': 'path/to/val', ...} +# dataset = EmotionDataset(config, 'train') +# dataloader = DataLoader(dataset, batch_size=32, shuffle=True) diff --git a/talkingface/data/dataset/hparams.py b/talkingface/data/dataset/hparams.py new file mode 100644 index 00000000..f9b73de9 --- /dev/null +++ b/talkingface/data/dataset/hparams.py @@ -0,0 +1,106 @@ +from glob import glob +import os + +def get_image_list(data_root, split): + filelist = [] + + with open('filelists/{}.txt'.format(split)) as f: + for line in f: + line = line.strip() + if ' ' in line: line = line.split()[0] + filelist.append(os.path.join(data_root, line)) + + return filelist + +class HParams: + def __init__(self, **kwargs): + self.data = {} + + for key, value in kwargs.items(): + self.data[key] = value + + def __getattr__(self, key): + if key not in self.data: + raise AttributeError("'HParams' object has no attribute %s" % key) + return self.data[key] + + def set_hparam(self, key, value): + self.data[key] = value + + +# Default hyperparameters +hparams = HParams( + num_mels=80, # Number of mel-spectrogram channels and local conditioning dimensionality + # network + rescale=True, # Whether to rescale audio prior to preprocessing + rescaling_max=0.9, # Rescaling value + + # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction + # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder + # Does not work if n_ffit is not multiple of hop_size!! + use_lws=False, + + n_fft=800, # Extra window size is filled with 0 paddings to match this parameter + hop_size=200, # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate) + win_size=800, # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate) + sample_rate=16000, # 16000Hz (corresponding to librispeech) (sox --i ) + + frame_shift_ms=None, # Can replace hop_size parameter. (Recommended: 12.5) + + # Mel and Linear spectrograms normalization/scaling and clipping + signal_normalization=True, + # Whether to normalize mel spectrograms to some predefined range (following below parameters) + allow_clipping_in_normalization=True, # Only relevant if mel_normalization = True + symmetric_mels=True, + # Whether to scale the data to be symmetric around 0. (Also multiplies the output range by 2, + # faster and cleaner convergence) + max_abs_value=4., + # max absolute value of data. If symmetric, data will be [-max, max] else [0, max] (Must not + # be too big to avoid gradient explosion, + # not too small for fast convergence) + # Contribution by @begeekmyfriend + # Spectrogram Pre-Emphasis (Lfilter: Reduce spectrogram noise and helps model certitude + # levels. Also allows for better G&L phase reconstruction) + preemphasize=True, # whether to apply filter + preemphasis=0.97, # filter coefficient. + + # Limits + min_level_db=-100, + ref_level_db=20, + fmin=55, + # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To + # test depending on dataset. Pitch info: male~[65, 260], female~[100, 525]) + fmax=7600, # To be increased/reduced depending on data. + + ###################### Our training parameters ################################# + img_size=96, + fps=25, + + batch_size=16, + initial_learning_rate=1e-4, + nepochs=200000000000000000, ### ctrl + c, stop whenever eval loss is consistently greater than train loss for ~10 epochs + num_workers=16, + checkpoint_interval=10000, + eval_interval=3000, + save_optimizer_state=True, + + syncnet_wt=0.0, # is initially zero, will be set automatically to 0.03 later. Leads to faster convergence. + syncnet_batch_size=128, + syncnet_lr=1e-4, + syncnet_eval_interval=500, + syncnet_checkpoint_interval=1000, + + emo_wt=0.01, + pl_wt=0.01, + disc_wt=0.07, + disc_initial_learning_rate=1e-4, + + beta_1=0.999, + beta_2=0.999, +) + + +def hparams_debug_string(): + values = hparams.values() + hp = [" %s: %s" % (name, values[name]) for name in sorted(values) if name != "sentences"] + return "Hyperparameters:\n" + "\n".join(hp) diff --git a/talkingface/model/audio_driven_talkingface/emogen_emo_disc.py b/talkingface/model/audio_driven_talkingface/emogen_emo_disc.py new file mode 100644 index 00000000..29068a5e --- /dev/null +++ b/talkingface/model/audio_driven_talkingface/emogen_emo_disc.py @@ -0,0 +1,77 @@ +from logging import getLogger +import torch +import torch.nn as nn +import torch.nn.functional as F +from abstract_talkingface import AbstractTalkingFace # 假设这是AbstractTalkingFace的导入路径 + +class DISCEMO(AbstractTalkingFace): + def __init__(self, debug=False): + super(DISCEMO, self).__init__() + self.debug = debug + self.drp_rate = 0 + + # 定义卷积层 + self.filters = [(64, 3, 2), (128, 3, 2), (256, 3, 2), (512, 3, 2), (512, 3, 2)] + prev_filters = 3 + self.conv_layers = nn.ModuleList() + for num_filters, filter_size, stride in self.filters: + self.conv_layers.append( + nn.Sequential( + nn.Conv2d(prev_filters, num_filters, kernel_size=filter_size, stride=stride, padding=filter_size//2), + nn.LeakyReLU(0.3) + ) + ) + prev_filters = num_filters + + # 线性层和 RNN + self.projector = nn.Sequential( + nn.Linear(4608, 2048), + nn.LeakyReLU(0.3), + nn.Linear(2048, 512) + ) + self.rnn_1 = nn.LSTM(512, 512, 1, bidirectional=False, batch_first=True) + self.cls = nn.Sequential( + nn.Linear(512, 6) + ) + + # 优化器和学习率调度器 + self.opt = torch.optim.Adam(self.parameters(), lr=1e-06, betas=(0.5, 0.999)) + self.scheduler = torch.optim.lr_scheduler.StepLR(self.opt, 150, gamma=0.1, last_epoch=-1) + + def forward(self, video): + x = video + n, c, t, w, h = x.size() + x = x.view(t * n, c, w, h) + + # 逐层应用卷积 + for conv in self.conv_layers: + x = conv(x) + + # 将输出重新塑形并通过后续层 + h = x.view(n, t, -1) + h = self.projector(h) + h, _ = self.rnn_1(h) + + # 分类输出 + h_class = self.cls(h[:, -1, :]) + return h_class + + def calculate_loss(self, interaction): + video = interaction['input'] + target = interaction['target'] + + output = self.forward(video) + loss = self.loss_func(output, target) + return {'loss': loss} + + def predict(self, interaction): + video = interaction['input'] + with torch.no_grad(): + output = self.forward(video) + return output + + def generate_batch(self): + # 实现批量生成数据的方法 + raise NotImplementedError + + # 其他方法可以根据需要保持不变或进行修改 diff --git a/talkingface/properties/emogen.yaml b/talkingface/properties/emogen.yaml new file mode 100644 index 00000000..5f15477c --- /dev/null +++ b/talkingface/properties/emogen.yaml @@ -0,0 +1,70 @@ +# Enviroment Settings +gpu_id: '3' # The id of GPU device(s). +worker: 0 # The number of workers processing the data. +use_gpu: True # Whether or not to use GPU. +seed: 2023 # Random seed. +checkpoint_dir: 'saved' # The path to save checkpoint file. +show_progress: True # Whether or not to show the progress bar of every epoch. +log_wandb: False # Whether or not to use Weights & Biases(W&B). +shuffle: True # Whether or not to shuffle the training data before each epoch. +device: 'cuda' +reproducibility: True # Whether or not to make results reproducible. + +# Training Settings +epochs: 300 # The number of training epochs. +train_batch_size: 16 # The training batch size. +learner: adam # The name of used optimizer. +learning_rate: 0.0001 # Learning rate. +eval_step: 1 # The number of training epochs before an evaluation on the valid dataset. +stopping_step: 10 # The threshold for validation-based early stopping. +weight_decay: 0.0 # The weight decay value (L2 penalty) for optimizers. +saved: True +resume: True +train: True + +# Additional Model Settings +img_size: 96 +fps: 25 +num_workers: 16 +checkpoint_interval: 10000 +eval_interval: 3000 +save_optimizer_state: True +syncnet_wt: 0.0 +syncnet_batch_size: 128 +syncnet_lr: 0.0001 +syncnet_eval_interval: 500 +syncnet_checkpoint_interval: 1000 +emo_wt: 0.01 +pl_wt: 0.01 +disc_wt: 0.07 +disc_initial_learning_rate: 0.0001 +beta_1: 0.999 +beta_2: 0.999 + +# Audio Preprocessing +num_mels: 80 +rescale: True +rescaling_max: 0.9 +use_lws: False +n_fft: 800 +hop_size: 200 +win_size: 800 +sample_rate: 16000 +signal_normalization: True +allow_clipping_in_normalization: True +symmetric_mels: True +max_abs_value: 4.0 +preemphasize: True +preemphasis: 0.97 +min_level_db: -100 +ref_level_db: 20 +fmin: 55 +fmax: 7600 + +# Evaluation Settings +metrics: ["LSE", "SSIM"] +evaluate_batch_size: 50 # The evaluation batch size. +lse_checkpoint_path: 'checkpoints/LSE/syncnet_v2.model' +temp_dir: 'results/temp' +lse_reference_dir: 'lse' +valid_metric_bigger: False # Whether to take a bigger valid metric va From 0bbd9b719327c9529eb6952d2cdc18f7e8ebc64e Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:07:38 +0800 Subject: [PATCH 02/28] Create conv.py --- talkingface/model/conv.py | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 talkingface/model/conv.py diff --git a/talkingface/model/conv.py b/talkingface/model/conv.py new file mode 100644 index 00000000..ed83da00 --- /dev/null +++ b/talkingface/model/conv.py @@ -0,0 +1,44 @@ +import torch +from torch import nn +from torch.nn import functional as F + +class Conv2d(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.Conv2d(cin, cout, kernel_size, stride, padding), + nn.BatchNorm2d(cout) + ) + self.act = nn.ReLU() + self.residual = residual + + def forward(self, x): + out = self.conv_block(x) + if self.residual: + out += x + return self.act(out) + +class nonorm_Conv2d(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.Conv2d(cin, cout, kernel_size, stride, padding), + ) + self.act = nn.LeakyReLU(0.01, inplace=True) + + def forward(self, x): + out = self.conv_block(x) + return self.act(out) + +class Conv2dTranspose(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, output_padding=0, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.ConvTranspose2d(cin, cout, kernel_size, stride, padding, output_padding), + nn.BatchNorm2d(cout) + ) + self.act = nn.ReLU() + + def forward(self, x): + out = self.conv_block(x) + return self.act(out) From 4617743852a1dadbb80fc12fac112a3b781f350b Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:14:22 +0800 Subject: [PATCH 03/28] Create syncnet.py --- talkingface/model/syncnet.py | 66 ++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 talkingface/model/syncnet.py diff --git a/talkingface/model/syncnet.py b/talkingface/model/syncnet.py new file mode 100644 index 00000000..e773cdca --- /dev/null +++ b/talkingface/model/syncnet.py @@ -0,0 +1,66 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .conv import Conv2d + +class SyncNet_color(nn.Module): + def __init__(self): + super(SyncNet_color, self).__init__() + + self.face_encoder = nn.Sequential( + Conv2d(15, 32, kernel_size=(7, 7), stride=1, padding=3), + + Conv2d(32, 64, kernel_size=5, stride=(1, 2), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=2, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=2, padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(512, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + self.audio_encoder = nn.Sequential( + Conv2d(1, 32, kernel_size=3, stride=1, padding=1), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=3, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + def forward(self, audio_sequences, face_sequences): # audio_sequences := (B, dim, T) + face_embedding = self.face_encoder(face_sequences) + audio_embedding = self.audio_encoder(audio_sequences) + + audio_embedding = audio_embedding.view(audio_embedding.size(0), -1) + face_embedding = face_embedding.view(face_embedding.size(0), -1) + + audio_embedding = F.normalize(audio_embedding, p=2, dim=1) + face_embedding = F.normalize(face_embedding, p=2, dim=1) + + + return audio_embedding, face_embedding From 292f31c93fa6a60a441499180740716712241c65 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:17:07 +0800 Subject: [PATCH 04/28] Create wav2lip.py --- talkingface/model/wav2lip.py | 210 +++++++++++++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 talkingface/model/wav2lip.py diff --git a/talkingface/model/wav2lip.py b/talkingface/model/wav2lip.py new file mode 100644 index 00000000..e8a126cc --- /dev/null +++ b/talkingface/model/wav2lip.py @@ -0,0 +1,210 @@ +import torch +from torch import nn +from torch.nn import functional as F +import math + +from .conv import Conv2dTranspose, Conv2d, nonorm_Conv2d + +class Wav2Lip(nn.Module): + def __init__(self): + super(Wav2Lip, self).__init__() + + self.face_encoder_blocks = nn.ModuleList([ + nn.Sequential(Conv2d(6, 16, kernel_size=7, stride=1, padding=3)), # 96,96 + # nn.Sequential(Conv2d(3, 16, kernel_size=7, stride=1, padding=3)), # if we remove fully masked input + + nn.Sequential(Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # 48,48 + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(32, 64, kernel_size=3, stride=2, padding=1), # 24,24 + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(64, 128, kernel_size=3, stride=2, padding=1), # 12,12 + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(128, 256, kernel_size=3, stride=2, padding=1), # 6,6 + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(256, 512, kernel_size=3, stride=2, padding=1), # 3,3 + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), + + nn.Sequential(Conv2d(512, 512, kernel_size=3, stride=1, padding=0), # 1, 1 + Conv2d(512, 512, kernel_size=1, stride=1, padding=0)),]) + + self.emotion_encoder = nn.Sequential( + nn.Linear(6, 512), + nn.LeakyReLU(0.2), + nn.Linear(512, 512), + nn.LeakyReLU(0.2) + ) + + # self.emotion_rnn = nn.LSTM(512, 512, 2, batch_first=True) + # self.emo_classifier = nn.Sequential( + # nn.Linear(512, 512), + # nn.LeakyReLU(0.2), + # nn.Linear(512, 6), + # ) + + self.audio_encoder = nn.Sequential( + Conv2d(1, 32, kernel_size=3, stride=1, padding=1), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=3, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + self.face_decoder_blocks = nn.ModuleList([ + nn.Sequential(Conv2d(1024, 512, kernel_size=1, stride=1, padding=0),), + + nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=1, padding=0), # 3,3 + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), + + nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), # 6, 6 + + nn.Sequential(Conv2dTranspose(768, 384, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True),), # 12, 12 + + nn.Sequential(Conv2dTranspose(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),), # 24, 24 + + nn.Sequential(Conv2dTranspose(320, 128, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),), # 48, 48 + + nn.Sequential(Conv2dTranspose(160, 64, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),),]) # 96,96 + + self.output_block = nn.Sequential(Conv2d(80, 32, kernel_size=3, stride=1, padding=1), # 80->81 + nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0), + nn.Sigmoid()) + + def forward(self, audio_sequences, face_sequences, emotion): + # audio_sequences = (B, T, 1, 80, 16) + B = audio_sequences.size(0) + + #emotion = (B, 6) + # repeating the same emotion for every frame + emotion = emotion.unsqueeze(1).repeat(1, 5, 1) #(B, T, 6) + + input_dim_size = len(face_sequences.size()) + if input_dim_size > 4: + audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0) + emotion = torch.cat([emotion[:, i] for i in range(emotion.size(1))], dim=0) #(B*T, 6) + face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0) + + audio_embedding = self.audio_encoder(audio_sequences) # B*T, 512, 1, 1 + emotion_embedding = self.emotion_encoder(emotion) + # ee_needed = torch.mean(emotion_embedding,0).unsqueeze(0) + + emotion_embedding = emotion_embedding.view(-1,512,1,1) # B*T, 512, 1, 1 + + feats = [] + x = face_sequences + for f in self.face_encoder_blocks: + x = f(x) + feats.append(x) + + x = audio_embedding + x = torch.cat((x, emotion_embedding), dim=1) + for f in self.face_decoder_blocks: + x = f(x) + try: + x = torch.cat((x, feats[-1]), dim=1) + except Exception as e: + print(x.size()) + print(feats[-1].size()) + raise e + + feats.pop() + + x = self.output_block(x) #(B*T,80,96,96)->(B*5,3,96,96) + + if input_dim_size > 4: + x = torch.split(x, B, dim=0) # [(B, C, H, W)] + outputs = torch.stack(x, dim=2) # (B, C, T, H, W) + + else: + outputs = x + + return outputs + + +class Wav2Lip_disc_qual(nn.Module): + def __init__(self): + super(Wav2Lip_disc_qual, self).__init__() + + self.face_encoder_blocks = nn.ModuleList([ + nn.Sequential(nonorm_Conv2d(3, 32, kernel_size=7, stride=1, padding=3)), # 48,96 + + nn.Sequential(nonorm_Conv2d(32, 64, kernel_size=5, stride=(1, 2), padding=2), # 48,48 + nonorm_Conv2d(64, 64, kernel_size=5, stride=1, padding=2)), + + nn.Sequential(nonorm_Conv2d(64, 128, kernel_size=5, stride=2, padding=2), # 24,24 + nonorm_Conv2d(128, 128, kernel_size=5, stride=1, padding=2)), + + nn.Sequential(nonorm_Conv2d(128, 256, kernel_size=5, stride=2, padding=2), # 12,12 + nonorm_Conv2d(256, 256, kernel_size=5, stride=1, padding=2)), + + nn.Sequential(nonorm_Conv2d(256, 512, kernel_size=3, stride=2, padding=1), # 6,6 + nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1)), + + nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=2, padding=1), # 3,3 + nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=1),), + + nn.Sequential(nonorm_Conv2d(512, 512, kernel_size=3, stride=1, padding=0), # 1, 1 + nonorm_Conv2d(512, 512, kernel_size=1, stride=1, padding=0)),]) + + self.binary_pred = nn.Sequential(nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0), nn.Sigmoid()) + self.label_noise = .0 + + def get_lower_half(self, face_sequences): + return face_sequences[:, :, face_sequences.size(2)//2:] + + def to_2d(self, face_sequences): + B = face_sequences.size(0) + face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0) + return face_sequences + + def perceptual_forward(self, false_face_sequences): + false_face_sequences = self.to_2d(false_face_sequences) + false_face_sequences = self.get_lower_half(false_face_sequences) + + false_feats = false_face_sequences + for f in self.face_encoder_blocks: + false_feats = f(false_feats) + + false_pred_loss = F.binary_cross_entropy(self.binary_pred(false_feats).view(len(false_feats), -1), + torch.ones((len(false_feats), 1)).cuda()) + + return false_pred_loss + + def forward(self, face_sequences): + face_sequences = self.to_2d(face_sequences) + face_sequences = self.get_lower_half(face_sequences) + + x = face_sequences + for f in self.face_encoder_blocks: + x = f(x) + + return self.binary_pred(x).view(len(x), -1) From 810ca8cb365acd474442ccb761b553c21915b1b2 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:18:22 +0800 Subject: [PATCH 05/28] Update __init__.py --- talkingface/model/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/talkingface/model/__init__.py b/talkingface/model/__init__.py index e69de29b..786e914d 100644 --- a/talkingface/model/__init__.py +++ b/talkingface/model/__init__.py @@ -0,0 +1,2 @@ +from .wav2lip import Wav2Lip, Wav2Lip_disc_qual +from .syncnet import SyncNet_color From 34d453b9745e1f9d99add11e0aa086e023ee6b1c Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:21:23 +0800 Subject: [PATCH 06/28] Update requirements.txt --- requirements.txt | 125 +++++------------------------------------------ 1 file changed, 11 insertions(+), 114 deletions(-) diff --git a/requirements.txt b/requirements.txt index 1605c1fe..80c342fa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,114 +1,11 @@ -absl-py==2.0.0 -addict==2.4.0 -aiosignal==1.3.1 -appdirs==1.4.4 -attrs==23.1.0 -audioread==3.0.1 -basicsr==1.3.4.7 -cachetools==5.3.2 -certifi==2020.12.5 -cffi==1.16.0 -charset-normalizer==3.3.2 -click==8.1.7 -cloudpickle==3.0.0 -colorama==0.4.6 -colorlog==6.7.0 -contourpy==1.1.1 -cycler==0.12.1 -decorator==5.1.1 -dlib==19.22.1 -docker-pycreds==0.4.0 -face-alignment==1.3.5 -ffmpeg==1.4 -filelock==3.13.1 -fonttools==4.44.0 -frozenlist==1.4.0 -future==0.18.3 -gitdb==4.0.11 -GitPython==3.1.40 -glob2==0.7 -google-auth==2.23.4 -google-auth-oauthlib==0.4.6 -grpcio==1.59.2 -hyperopt==0.2.5 -idna==3.4 -imageio==2.9.0 -imageio-ffmpeg==0.4.5 -importlib-metadata==6.8.0 -importlib-resources==6.1.0 -joblib==1.3.2 -jsonschema==4.19.2 -jsonschema-specifications==2023.7.1 -kiwisolver==1.4.5 -kornia==0.5.5 -lazy_loader==0.3 -librosa==0.10.1 -llvmlite==0.37.0 -lmdb==1.2.1 -lws==1.2.7 -Markdown==3.5.1 -MarkupSafe==2.1.3 -matplotlib==3.6.3 -msgpack==1.0.7 -networkx==3.1 -numba==0.54.1 -numpy==1.20.3 -oauthlib==3.2.2 -opencv-python==3.4.9.33 -packaging==23.2 -pandas==1.3.4 -pathtools==0.1.2 -Pillow==6.2.1 -pkgutil_resolve_name==1.3.10 -platformdirs==3.11.0 -plotly==5.18.0 -pooch==1.8.0 -protobuf==4.25.0 -psutil==5.9.6 -pyasn1==0.5.0 -pyasn1-modules==0.3.0 -pycparser==2.21 -pyparsing==3.1.1 -python-dateutil==2.8.2 -python-speech-features==0.6 -pytorch-fid==0.3.0 -pytz==2023.3.post1 -PyWavelets==1.4.1 -PyYAML==5.3.1 -ray==2.6.3 -referencing==0.30.2 -requests==2.31.0 -requests-oauthlib==1.3.1 -rpds-py==0.12.0 -rsa==4.9 -scikit-image==0.16.2 -scikit-learn==1.3.2 -scipy==1.5.0 -sentry-sdk==1.34.0 -setproctitle==1.3.3 -six==1.16.0 -smmap==5.0.1 -soundfile==0.12.1 -soxr==0.3.7 -tabulate==0.9.0 -tb-nightly==2.12.0a20230126 -tenacity==8.2.3 -tensorboard==2.7.0 -tensorboard-data-server==0.6.1 -tensorboard-plugin-wit==1.8.1 -texttable==1.7.0 -thop==0.1.1.post2209072238 -threadpoolctl==3.2.0 -tomli==2.0.1 -torch==1.13.1+cu116 -torchaudio==0.13.1+cu116 -torchvision==0.14.1+cu116 -tqdm==4.66.1 -trimesh==3.9.20 -typing_extensions==4.8.0 -tzdata==2023.3 -urllib3==2.0.7 -wandb==0.15.12 -Werkzeug==3.0.1 -yapf==0.40.2 -zipp==3.17.0 +librosa==0.9.1 +numba +numpy +opencv-python==4.1.1.26 +torch>=1.1.0 +torchvision>=0.3.0 +tqdm>=4.45.0 +dlib +scikit-image +matplotlib +h5py From cda9e3956731386797562ad2e2674178fda46331 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:25:23 +0800 Subject: [PATCH 07/28] Create color_syncnet_train.py --- talkingface/trainer/color_syncnet_train.py | 341 +++++++++++++++++++++ 1 file changed, 341 insertions(+) create mode 100644 talkingface/trainer/color_syncnet_train.py diff --git a/talkingface/trainer/color_syncnet_train.py b/talkingface/trainer/color_syncnet_train.py new file mode 100644 index 00000000..062a0176 --- /dev/null +++ b/talkingface/trainer/color_syncnet_train.py @@ -0,0 +1,341 @@ +from os.path import dirname, join, basename, isfile, isdir +from tqdm import tqdm + +from models import SyncNet_color as SyncNet +import audio + +import torch +from torch import nn +from torch import optim +from torch.utils.tensorboard import SummaryWriter +import torch.backends.cudnn as cudnn +from torch.utils import data as data_utils +import numpy as np + +from glob import glob + +import os, random, cv2, argparse +import albumentations as A +from hparams import hparams, get_image_list + +parser = argparse.ArgumentParser(description='Code to train the expert lip-sync discriminator') + +parser.add_argument("--data_root", help="Root folder of the preprocessed LRS2 dataset", required=True) + +parser.add_argument('--checkpoint_dir', help='Save checkpoints to this directory', required=True, type=str) +parser.add_argument('--checkpoint_path', help='Resumed from this checkpoint', default=None, type=str) + +args = parser.parse_args() + + +global_step = 0 +global_epoch = 0 +os.environ['CUDA_VISIBLE_DEVICES']='2' +use_cuda = torch.cuda.is_available() +print('use_cuda: {}'.format(use_cuda)) + +syncnet_T = 5 +emonet_T = 5 +syncnet_mel_step_size = 16 + +class Dataset(object): + def __init__(self, split): + #self.all_videos = get_image_list(args.data_root, split) + self.all_videos = [join(args.data_root, f) for f in os.listdir(args.data_root) if isdir(join(args.data_root, f))] + print('Num files: ', len(self.all_videos)) + + # to apply same augmentation for all the frames + target = {} + for i in range(1, emonet_T): + target['image' + str(i)] = 'image' + + self.augments = A.Compose([ + A.RandomBrightnessContrast(p=0.2), + A.RandomGamma(p=0.2), + A.CLAHE(p=0.2), + A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.2), + A.ChannelShuffle(p=0.2), + A.RGBShift(p=0.2), + A.RandomBrightness(p=0.2), + A.RandomContrast(p=0.2), + A.GaussNoise(var_limit=(10.0, 50.0), p=0.25), + ], additional_targets=target, p=0.8) + + def augmentVideo(self, video): + args = {} + args['image'] = video[0, :, :, :] + for i in range(1, emonet_T): + args['image' + str(i)] = video[i, :, :, :] + result = self.augments(**args) + video[0, :, :, :] = result['image'] + for i in range(1, emonet_T): + video[i, :, :, :] = result['image' + str(i)] + return video + + def get_frame_id(self, frame): + return int(basename(frame).split('.')[0]) + + def get_window(self, start_frame): + start_id = self.get_frame_id(start_frame) + vidname = dirname(start_frame) + + window_fnames = [] + for frame_id in range(start_id, start_id + syncnet_T): + frame = join(vidname, '{}.jpg'.format(frame_id)) + if not isfile(frame): + return None + window_fnames.append(frame) + return window_fnames + + def crop_audio_window(self, spec, start_frame): + # num_frames = (T x hop_size * fps) / sample_rate + start_frame_num = self.get_frame_id(start_frame) + start_idx = int(80. * (start_frame_num / float(hparams.fps))) + + end_idx = start_idx + syncnet_mel_step_size + + return spec[start_idx : end_idx, :] + + + def __len__(self): + return len(self.all_videos) + + def __getitem__(self, idx): + while 1: + idx = random.randint(0, len(self.all_videos) - 1) + vidname = self.all_videos[idx] + #print(vidname) + + img_names = list(glob(join(vidname, '*.jpg'))) + if len(img_names) <= 3 * syncnet_T: + continue + img_name = random.choice(img_names) + wrong_img_name = random.choice(img_names) + while wrong_img_name == img_name: + wrong_img_name = random.choice(img_names) + + if random.choice([True, False]): + y = torch.ones(1).float() + chosen = img_name + else: + y = torch.zeros(1).float() + chosen = wrong_img_name + + window_fnames = self.get_window(chosen) + if window_fnames is None: + continue + + window = [] + all_read = True + for fname in window_fnames: + img = cv2.imread(fname) + if img is None: + all_read = False + break + try: + img = cv2.resize(img, (hparams.img_size, hparams.img_size)) + except Exception as e: + all_read = False + break + + window.append(img) + + if not all_read: continue + + try: + wavpath = join(vidname, "audio.wav") + wav = audio.load_wav(wavpath, hparams.sample_rate) + + orig_mel = audio.melspectrogram(wav).T + except Exception as e: + continue + + mel = self.crop_audio_window(orig_mel.copy(), img_name) + + if (mel.shape[0] != syncnet_mel_step_size): + continue + + # H x W x 3 * T + window = np.asarray(window) + aug_results = self.augmentVideo(window) + window = np.split(aug_results, syncnet_T, axis=0) + + x = np.concatenate(window, axis=3) / 255. + x = np.squeeze(x, axis=0).transpose(2, 0, 1) + # print(x.shape) + x = x[:, x.shape[1]//2:] + + x = torch.FloatTensor(x) + mel = torch.FloatTensor(mel.T).unsqueeze(0) + + return x, mel, y + +logloss = nn.BCELoss() +def cosine_loss(a, v, y): + d = nn.functional.cosine_similarity(a, v) + loss = logloss(d.unsqueeze(1), y) + + return loss + +def train(device, model, train_data_loader, test_data_loader, optimizer, + checkpoint_dir=None, checkpoint_interval=None, nepochs=None): + + #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=6) + + global global_step, global_epoch + resumed_step = global_step + num_batches = len(train_data_loader) + + while global_epoch < nepochs: + print('Epoch: {}'.format(global_epoch)) + running_loss = 0. + prog_bar = tqdm(enumerate(train_data_loader)) + for step, (x, mel, y) in prog_bar: + model.train() + optimizer.zero_grad() + + # Transform data to CUDA device + x = x.to(device) + mel = mel.to(device) + + a, v = model(mel, x) + y = y.to(device) + + loss = cosine_loss(a, v, y) + loss.backward() + optimizer.step() + + global_step += 1 + cur_session_steps = global_step - resumed_step + running_loss += loss.item() + + # if global_step == 1 or global_step % checkpoint_interval == 0: + # save_checkpoint( + # model, optimizer, global_step, checkpoint_dir, global_epoch) + + # if global_step % hparams.syncnet_eval_interval == 0: + # with torch.no_grad(): + # eval_loss = eval_model(test_data_loader, global_step, device, model, checkpoint_dir) + + prog_bar.set_description('Loss: {}'.format(running_loss / (step + 1))) + + writer.add_scalar("Loss/train", running_loss/num_batches, global_epoch) + + with torch.no_grad(): + eval_loss = eval_model(test_data_loader, global_step, device, model, checkpoint_dir) + if(global_epoch % 50 == 0): + save_checkpoint(model, optimizer, global_step, checkpoint_dir, global_epoch) + + global_epoch += 1 + +def eval_model(test_data_loader, global_step, device, model, checkpoint_dir): + eval_steps = 1400 + print('Evaluating for {} steps'.format(eval_steps)) + losses = [] + while 1: + for step, (x, mel, y) in enumerate(test_data_loader): + + model.eval() + + # Transform data to CUDA device + x = x.to(device) + + mel = mel.to(device) + + a, v = model(mel, x) + y = y.to(device) + + loss = cosine_loss(a, v, y) + losses.append(loss.item()) + + if step > eval_steps: break + + averaged_loss = sum(losses) / len(losses) + print(averaged_loss) + writer.add_scalar("Loss/val", averaged_loss, global_step) + + return averaged_loss + +def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch): + + checkpoint_path = join( + checkpoint_dir, "checkpoint_step{:09d}.pth".format(global_step)) + optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None + torch.save({ + "state_dict": model.state_dict(), + "optimizer": optimizer_state, + "global_step": step, + "global_epoch": epoch, + }, checkpoint_path) + print("Saved checkpoint:", checkpoint_path) + +def _load(checkpoint_path): + if use_cuda: + checkpoint = torch.load(checkpoint_path) + else: + checkpoint = torch.load(checkpoint_path, + map_location=lambda storage, loc: storage) + return checkpoint + +def load_checkpoint(path, model, optimizer, reset_optimizer=False): + global global_step + global global_epoch + + print("Load checkpoint from: {}".format(path)) + checkpoint = _load(path) + model.load_state_dict(checkpoint["state_dict"]) + if not reset_optimizer: + optimizer_state = checkpoint["optimizer"] + if optimizer_state is not None: + print("Load optimizer state from {}".format(path)) + optimizer.load_state_dict(checkpoint["optimizer"]) + global_step = checkpoint["global_step"] + global_epoch = checkpoint["global_epoch"] + + return model + +if __name__ == "__main__": + checkpoint_dir = args.checkpoint_dir + checkpoint_path = args.checkpoint_path + + if not os.path.exists(checkpoint_dir): os.mkdir(checkpoint_dir) + + # Dataset and Dataloader setup + #train_dataset = Dataset('train') + #test_dataset = Dataset('val') + + full_dataset = Dataset('train') + train_size = int(0.95 * len(full_dataset)) + test_size = len(full_dataset) - train_size + train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size], generator=torch.Generator().manual_seed(42)) + + train_data_loader = data_utils.DataLoader( + train_dataset, batch_size=hparams.syncnet_batch_size, shuffle=True, + num_workers=hparams.num_workers) + + test_data_loader = data_utils.DataLoader( + test_dataset, batch_size=hparams.syncnet_batch_size, + num_workers=8) + + device = torch.device("cuda" if use_cuda else "cpu") + + # Model + model = SyncNet().to(device) + #model = nn.DataParallel(SyncNet(), device_ids=[1,2]).to(device) + + print('total trainable params {}'.format(sum(p.numel() for p in model.parameters() if p.requires_grad))) + + optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], + lr=hparams.syncnet_lr,betas=(0.5,0.999)) + + if checkpoint_path is not None: + load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer=False) + + writer = SummaryWriter('runs/crema-d_disc_exp2_data_aug') + + train(device, model, train_data_loader, test_data_loader, optimizer, + checkpoint_dir=checkpoint_dir, + checkpoint_interval=hparams.syncnet_checkpoint_interval, + nepochs=hparams.nepochs) + + writer.flush() From 60dc714f17aff8f65a13a522ae6e4a9d2040c281 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:26:26 +0800 Subject: [PATCH 08/28] Create emotion_disc_train.py --- talkingface/trainer/emotion_disc_train.py | 133 ++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100644 talkingface/trainer/emotion_disc_train.py diff --git a/talkingface/trainer/emotion_disc_train.py b/talkingface/trainer/emotion_disc_train.py new file mode 100644 index 00000000..fdf10ba9 --- /dev/null +++ b/talkingface/trainer/emotion_disc_train.py @@ -0,0 +1,133 @@ +import argparse +import json +import os +from tqdm import tqdm +import random as rn +import shutil + +import numpy as np +import torch +import torch.nn as nn +from sklearn.metrics import accuracy_score +from torch.utils.tensorboard import SummaryWriter + +from models import emo_disc +from datagen_aug import Dataset + +def initParams(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("-i", "--in-path", type=str, help="Input folder containing train data", default=None, required=True) + # parser.add_argument("-v", "--val-path", type=str, help="Input folder containing validation data", default=None, required=True) + parser.add_argument("-o", "--out-path", type=str, help="output folder", default='../models/def', required=True) + + parser.add_argument('--num_epochs', type=int, default=10000) + parser.add_argument("--batch-size", type=int, default=64) + + parser.add_argument('--lr_emo', type=float, default=1e-06) + + parser.add_argument("--gpu-no", type=str, help="select gpu", default='1') + parser.add_argument('--seed', type=int, default=9) + + args = parser.parse_args() + + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_no + + args.batch_size = args.batch_size * max(int(torch.cuda.device_count()), 1) + args.steplr = 200 + + args.filters = [64, 128, 256, 512, 512] + #-----------------------------------------# + # Reproducible results # + #-----------------------------------------# + os.environ['PYTHONHASHSEED'] = str(args.seed) + np.random.seed(args.seed) + rn.seed(args.seed) + torch.manual_seed(args.seed) + #-----------------------------------------# + + if not os.path.exists(args.out_path): + os.makedirs(args.out_path) + else: + shutil.rmtree(args.out_path) + os.mkdir(args.out_path) + + with open(os.path.join(args.out_path, 'args.txt'), 'w') as f: + json.dump(args.__dict__, f, indent=2) + + args.cuda = torch.cuda.is_available() + print('Cuda device available: ', args.cuda) + args.device = torch.device("cuda" if args.cuda else "cpu") + args.kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {} + + return args + +def init_weights(m): + if type(m) == nn.Linear or type(m) == nn.Conv2d or type(m) == nn.Conv1d: + torch.nn.init.xavier_uniform_(m.weight) + +def enableGrad(model, requires_grad): + for p in model.parameters(): + p.requires_grad_(requires_grad) + + +def train(): + args = initParams() + + trainDset = Dataset(args) + + train_loader = torch.utils.data.DataLoader(trainDset, + batch_size=args.batch_size, + shuffle=True, + drop_last=True, + **args.kwargs) + + device_ids = list(range(torch.cuda.device_count())) + + disc_emo = emo_disc.DISCEMO().to(args.device) + disc_emo.apply(init_weights) + #disc_emo = nn.DataParallel(disc_emo, device_ids) + + emo_loss_disc = nn.CrossEntropyLoss() + + num_batches = len(train_loader) + print(args.batch_size, num_batches) + + global_step = 0 + + for epoch in range(args.num_epochs): + print('Epoch: {}'.format(epoch)) + prog_bar = tqdm(enumerate(train_loader)) + running_loss = 0. + for step, (x, y) in prog_bar: + video, emotion = x.to(args.device), y.to(args.device) + + disc_emo.train() + + disc_emo.opt.zero_grad() # .module is because of nn.DataParallel + + class_real = disc_emo(video) + + loss = emo_loss_disc(class_real, torch.argmax(emotion, dim=1)) + + running_loss += loss.item() + + loss.backward() + disc_emo.opt.step() # .module is because of nn.DataParallel + + if global_step % 1000 == 0: + print('Saving the network') + torch.save(disc_emo.state_dict(), os.path.join(args.out_path, f'disc_emo_{global_step}.pth')) + print('Network has been saved') + + prog_bar.set_description('classification Loss: {}'.format(running_loss / (step + 1))) + + global_step += 1 + + writer.add_scalar("classification Loss", running_loss/num_batches, epoch) + + disc_emo.scheduler.step() # .module is because of nn.DataParallel + +if __name__ == "__main__": + + writer = SummaryWriter('runs/emo_disc_exp4') + train() From 3bfba5b9e3914118d6b700728dddd27c1e050305 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:28:27 +0800 Subject: [PATCH 09/28] Update README.md --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index 7cd82328..75938102 100644 --- a/README.md +++ b/README.md @@ -207,4 +207,34 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx - 每个组都要提交一个README文件,写明完成的功能、最终实现的训练、验证截图、所使用的依赖、成员分工等。 +##小组README文件 +###张卓远 +编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc部分模型。 +运行步骤: +进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 +python emogen_dataprocess.py --inputfolder --preprocessed_root +预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 +并将转化好的视频存入./modified_videos文件夹下 +数据预处理第一步转换FPS演示结果 +转换视频FPS结果 +程序会自动运行数据预处理第二步: +数据预处理第二步演示 +由于本机内存空间不足转为使用其他云服务器测试,完成数据预处理的过程。 +预处理内存空间不足报错 +预处理完成演示 + +接下来训练情绪鉴别器: + +训练情绪鉴别器结果演示 + +由于训练过程过长,可以在中途输入Ctrl^C以停止训练 + + + + + + + + + From ed591f59f632545b61ef54a8c18e6391ec100063 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:29:12 +0800 Subject: [PATCH 10/28] Update utils.py --- talkingface/utils/utils.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/talkingface/utils/utils.py b/talkingface/utils/utils.py index d89cd80d..e8c7c802 100644 --- a/talkingface/utils/utils.py +++ b/talkingface/utils/utils.py @@ -7,6 +7,7 @@ import numpy as np import torch import torch.nn as nn +import torchvision from torch.utils.tensorboard import SummaryWriter from texttable import Texttable @@ -446,7 +447,31 @@ def create_dataset(config): return dataset_class(config, config['train_filelist']), dataset_class(config, config['val_filelist']) - +class perceptionLoss(): + def __init__(self, device): + vgg = torchvision.models.vgg19(pretrained=True) + vgg.eval() + self.features = vgg.features.to(device) + self.feature_layers = ['4', '9', '18', '27', '36'] + self.mse_loss = nn.MSELoss() + + def getfeatures(self, x): + feature_list = [] + for name, module in self.features._modules.items(): + x = module(x) + if name in self.feature_layers: + feature_list.append(x) + return feature_list + + def calculatePerceptionLoss(self, video_pd, video_gt): + features_pd = self.getfeatures(video_pd.view(video_pd.size(0)*video_pd.size(2), video_pd.size(1), video_pd.size(3), video_pd.size(4))) + features_gt = self.getfeatures(video_gt.view(video_gt.size(0)*video_gt.size(2), video_gt.size(1), video_gt.size(3), video_gt.size(4))) + + with torch.no_grad(): + features_gt = [x.detach() for x in features_gt] + + perceptual_loss = sum([self.mse_loss(features_pd[i], features_gt[i]) for i in range(len(features_gt))]) + return perceptual_loss From 47b2b38b6cf5887f33b6baa839ef04c4a1c53c72 Mon Sep 17 00:00:00 2001 From: Jiangzheng123 <95006276+Jiangzheng123@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:30:12 +0800 Subject: [PATCH 11/28] Update requirements.txt --- requirements.txt | 113 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/requirements.txt b/requirements.txt index 80c342fa..ae82ae6e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,116 @@ +absl-py==2.0.0 +addict==2.4.0 +aiosignal==1.3.1 +appdirs==1.4.4 +attrs==23.1.0 +audioread==3.0.1 +basicsr==1.3.4.7 +cachetools==5.3.2 +certifi==2020.12.5 +cffi==1.16.0 +charset-normalizer==3.3.2 +click==8.1.7 +cloudpickle==3.0.0 +colorama==0.4.6 +colorlog==6.7.0 +contourpy==1.1.1 +cycler==0.12.1 +decorator==5.1.1 +dlib==19.22.1 +docker-pycreds==0.4.0 +face-alignment==1.3.5 +ffmpeg==1.4 +filelock==3.13.1 +fonttools==4.44.0 +frozenlist==1.4.0 +future==0.18.3 +gitdb==4.0.11 +GitPython==3.1.40 +glob2==0.7 +google-auth==2.23.4 +google-auth-oauthlib==0.4.6 +grpcio==1.59.2 +hyperopt==0.2.5 +idna==3.4 +imageio==2.9.0 +imageio-ffmpeg==0.4.5 +importlib-metadata==6.8.0 +importlib-resources==6.1.0 +joblib==1.3.2 +jsonschema==4.19.2 +jsonschema-specifications==2023.7.1 +kiwisolver==1.4.5 +lazy_loader==0.3 +librosa==0.10.1 +llvmlite==0.37.0 +lmdb==1.2.1 +lws==1.2.7 +Markdown==3.5.1 +MarkupSafe==2.1.3 +matplotlib==3.6.3 +msgpack==1.0.7 +networkx==3.1 +numba==0.54.1 +numpy==1.20.3 +oauthlib==3.2.2 +opencv-python==3.4.9.33 +packaging==23.2 +pandas==1.3.4 +pathtools==0.1.2 +Pillow==6.2.1 +pkgutil_resolve_name==1.3.10 +platformdirs==3.11.0 +plotly==5.18.0 +pooch==1.8.0 +protobuf==4.25.0 +psutil==5.9.6 +pyasn1==0.5.0 +pyasn1-modules==0.3.0 +pycparser==2.21 +pyparsing==3.1.1 +python-dateutil==2.8.2 +python-speech-features==0.6 +pytorch-fid==0.3.0 +pytz==2023.3.post1 +PyWavelets==1.4.1 +PyYAML==5.3.1 +ray==2.6.3 +referencing==0.30.2 +requests==2.31.0 +requests-oauthlib==1.3.1 +rpds-py==0.12.0 +rsa==4.9 +scikit-image==0.16.2 +scikit-learn==1.3.2 +scipy==1.5.0 +sentry-sdk==1.34.0 +setproctitle==1.3.3 +six==1.16.0 +smmap==5.0.1 +soundfile==0.12.1 +soxr==0.3.7 +tabulate==0.9.0 +tb-nightly==2.12.0a20230126 +tenacity==8.2.3 +tensorboard==2.7.0 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +texttable==1.7.0 +thop==0.1.1.post2209072238 +threadpoolctl==3.2.0 +tomli==2.0.1 +torch==1.13.1+cu116 +torchaudio==0.13.1+cu116 +torchvision==0.14.1+cu116 +tqdm==4.66.1 +trimesh==3.9.20 +typing_extensions==4.8.0 +tzdata==2023.3 +urllib3==2.0.7 +wandb==0.15.12 +Werkzeug==3.0.1 +yapf==0.40.2 +zipp==3.17.0 librosa==0.9.1 numba numpy From b6d655dbb61fb1ac56b6961469b0b38869b2758b Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:53:21 +0800 Subject: [PATCH 12/28] Update README.md --- README.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 75938102..684b22a2 100644 --- a/README.md +++ b/README.md @@ -207,8 +207,13 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx - 每个组都要提交一个README文件,写明完成的功能、最终实现的训练、验证截图、所使用的依赖、成员分工等。 -##小组README文件 -###张卓远 +## 小组README文件 + +### 环境要求(依赖) +依赖库详见 requirements.txt。要求安装ffmpeg, 安装albumentations库 + +### 张卓远 + 编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc部分模型。 运行步骤: 进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 @@ -230,6 +235,13 @@ python emogen_dataprocess.py --inputfolder --preprocessed_ro 由于训练过程过长,可以在中途输入Ctrl^C以停止训练 +### 蒋政 +完成训练部分代码编写,实现专家口型同步鉴别器模型 +修改requirements.txt +修改utils文件夹中的utils.py +在model文件夹中添加conv.py, syncnet.py, wav2lip.py,修改__init__.py +在trainer文件夹中添加color_syncnet_train.py, emotion_disc_train.py + From cbf1a2bba4306b6a27de3301b1c8cea574aade5e Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 12:03:49 +0800 Subject: [PATCH 13/28] update models path --- talkingface/model/{ => audio_driven_talkingface}/conv.py | 0 .../{syncnet.py => audio_driven_talkingface/emogen_syncnet.py} | 0 talkingface/model/wav2lip.py | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename talkingface/model/{ => audio_driven_talkingface}/conv.py (100%) rename talkingface/model/{syncnet.py => audio_driven_talkingface/emogen_syncnet.py} (100%) diff --git a/talkingface/model/conv.py b/talkingface/model/audio_driven_talkingface/conv.py similarity index 100% rename from talkingface/model/conv.py rename to talkingface/model/audio_driven_talkingface/conv.py diff --git a/talkingface/model/syncnet.py b/talkingface/model/audio_driven_talkingface/emogen_syncnet.py similarity index 100% rename from talkingface/model/syncnet.py rename to talkingface/model/audio_driven_talkingface/emogen_syncnet.py diff --git a/talkingface/model/wav2lip.py b/talkingface/model/wav2lip.py index e8a126cc..aaad5a65 100644 --- a/talkingface/model/wav2lip.py +++ b/talkingface/model/wav2lip.py @@ -3,7 +3,7 @@ from torch.nn import functional as F import math -from .conv import Conv2dTranspose, Conv2d, nonorm_Conv2d +from audio_driven_talkingface.conv import Conv2dTranspose, Conv2d, nonorm_Conv2d class Wav2Lip(nn.Module): def __init__(self): From a868c27243a92e723331658aa0f46b2c5b741a1a Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 12:13:39 +0800 Subject: [PATCH 14/28] from path update --- talkingface/trainer/color_syncnet_train.py | 6 +++--- talkingface/trainer/emotion_disc_train.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/talkingface/trainer/color_syncnet_train.py b/talkingface/trainer/color_syncnet_train.py index 062a0176..2658d22b 100644 --- a/talkingface/trainer/color_syncnet_train.py +++ b/talkingface/trainer/color_syncnet_train.py @@ -1,8 +1,8 @@ from os.path import dirname, join, basename, isfile, isdir from tqdm import tqdm -from models import SyncNet_color as SyncNet -import audio +from model.audio_driven_talkingface.emogen_syncnet import SyncNet_color as SyncNet +import data.dataprocess.audio import torch from torch import nn @@ -16,7 +16,7 @@ import os, random, cv2, argparse import albumentations as A -from hparams import hparams, get_image_list +from data.dataset.hparams import hparams, get_image_list parser = argparse.ArgumentParser(description='Code to train the expert lip-sync discriminator') diff --git a/talkingface/trainer/emotion_disc_train.py b/talkingface/trainer/emotion_disc_train.py index fdf10ba9..3f963d35 100644 --- a/talkingface/trainer/emotion_disc_train.py +++ b/talkingface/trainer/emotion_disc_train.py @@ -11,8 +11,8 @@ from sklearn.metrics import accuracy_score from torch.utils.tensorboard import SummaryWriter -from models import emo_disc -from datagen_aug import Dataset +from model.audio_driven_talkingface.emogen_emo_disc import emo_disc +from data.dataset.dataset import Dataset def initParams(): parser = argparse.ArgumentParser(description=__doc__) From ef7bfddeaa21020b5bbbe11371d75ac0b3f34367 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 12:18:50 +0800 Subject: [PATCH 15/28] Update README.md --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 684b22a2..543a3c0f 100644 --- a/README.md +++ b/README.md @@ -237,9 +237,15 @@ python emogen_dataprocess.py --inputfolder --preprocessed_ro ### 蒋政 完成训练部分代码编写,实现专家口型同步鉴别器模型 + 修改requirements.txt + 修改utils文件夹中的utils.py -在model文件夹中添加conv.py, syncnet.py, wav2lip.py,修改__init__.py + +在model文件夹中添加 wav2lip.py,修改__init__.py + +在model/audio_driven_talkingface下添加conv.py , emogen_syncnet.py + 在trainer文件夹中添加color_syncnet_train.py, emotion_disc_train.py From 52d0930d8277f1db9e9e2df1822f584893322d1a Mon Sep 17 00:00:00 2001 From: zhouyang <1543772252@qq.com> Date: Tue, 30 Jan 2024 15:58:31 +0800 Subject: [PATCH 16/28] image_driven --- .../image_driven_talkingface/__init__.py | 1 + .../model/image_driven_talkingface/conv.py | 44 +++ .../image_driven_talkingface/emo_disc.py | 80 ++++ .../image_driven_talkingface/emo_syncnet.py | 66 ++++ .../model/image_driven_talkingface/wav2lip.py | 358 ++++++++++++++++++ 5 files changed, 549 insertions(+) create mode 100644 talkingface/model/image_driven_talkingface/conv.py create mode 100644 talkingface/model/image_driven_talkingface/emo_disc.py create mode 100644 talkingface/model/image_driven_talkingface/emo_syncnet.py create mode 100644 talkingface/model/image_driven_talkingface/wav2lip.py diff --git a/talkingface/model/image_driven_talkingface/__init__.py b/talkingface/model/image_driven_talkingface/__init__.py index e69de29b..60cb0669 100644 --- a/talkingface/model/image_driven_talkingface/__init__.py +++ b/talkingface/model/image_driven_talkingface/__init__.py @@ -0,0 +1 @@ +from talkingface.model.image_driven_talkingface.wav2lip import Wav2Lip, SyncNet_color \ No newline at end of file diff --git a/talkingface/model/image_driven_talkingface/conv.py b/talkingface/model/image_driven_talkingface/conv.py new file mode 100644 index 00000000..92751dba --- /dev/null +++ b/talkingface/model/image_driven_talkingface/conv.py @@ -0,0 +1,44 @@ +import torch +from torch import nn +from torch.nn import functional as F + +class Conv2d(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.Conv2d(cin, cout, kernel_size, stride, padding), + nn.BatchNorm2d(cout) + ) + self.act = nn.ReLU() + self.residual = residual + + def forward(self, x): + out = self.conv_block(x) + if self.residual: + out += x + return self.act(out) + +class nonorm_Conv2d(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.Conv2d(cin, cout, kernel_size, stride, padding), + ) + self.act = nn.LeakyReLU(0.01, inplace=True) + + def forward(self, x): + out = self.conv_block(x) + return self.act(out) + +class Conv2dTranspose(nn.Module): + def __init__(self, cin, cout, kernel_size, stride, padding, output_padding=0, *args, **kwargs): + super().__init__(*args, **kwargs) + self.conv_block = nn.Sequential( + nn.ConvTranspose2d(cin, cout, kernel_size, stride, padding, output_padding), + nn.BatchNorm2d(cout) + ) + self.act = nn.ReLU() + + def forward(self, x): + out = self.conv_block(x) + return self.act(out) \ No newline at end of file diff --git a/talkingface/model/image_driven_talkingface/emo_disc.py b/talkingface/model/image_driven_talkingface/emo_disc.py new file mode 100644 index 00000000..0786666b --- /dev/null +++ b/talkingface/model/image_driven_talkingface/emo_disc.py @@ -0,0 +1,80 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +#继承抽象基类 +from abstract_talkingface import AbstractTalkingFace +class DISCEMO(nn.Module): + def __init__(self, debug=False): + super(DISCEMO, self).__init__() + # self.args = args + self.drp_rate = 0 + + self.filters = [(64, 3, 2), (128, 3, 2), (256, 3, 2), (512, 3, 2), (512, 3, 2)] + + prev_filters = 3 + for i, (num_filters, filter_size, stride) in enumerate(self.filters): + setattr(self, + 'conv_' + str(i + 1), + nn.Sequential( + nn.Conv2d(prev_filters, num_filters, kernel_size=filter_size, stride=stride, + padding=filter_size // 2), + nn.LeakyReLU(0.3) + ) + ) + prev_filters = num_filters + + self.projector = nn.Sequential( + nn.Linear(4608, 2048), + nn.LeakyReLU(0.3), + nn.Linear(2048, 512) + ) + + self.rnn_1 = nn.LSTM(512, 512, 1, bidirectional=False, batch_first=True) + + self.cls = nn.Sequential( + nn.Linear(512, 6) + ) + + # Optimizer + self.opt = torch.optim.Adam(list(self.parameters()), lr=1e-06, betas=(0.5, 0.999)) + # self.opt = optim.RMSprop(list(self.parameters()), lr = params['LR_DE']) + self.scheduler = torch.optim.lr_scheduler.StepLR(self.opt, 150, gamma=0.1, last_epoch=-1) + # self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.opt, gamma=0.95, last_epoch=-1) + + def forward(self, video): + x = video + n, c, t, w, h = x.size(0), x.size(1), x.size(2), x.size(3), x.size(4) + x = x.contiguous().view(t * n, c, w, h) + + for i in range(len(self.filters)): + x = getattr(self, 'conv_' + str(i + 1))(x) + h = x.view(n, t, -1) + h = self.projector(h) + + h, _ = self.rnn_1(h) + + h_class = self.cls(h[:, -1, :]) + + return h_class + + def calculate_loss(self, interaction): + video = interaction['input'] + target = interaction['target'] + + output = self.forward(video) + loss = self.loss_func(output, target) + return {'loss': loss} + + def predict(self, interaction): + video = interaction['input'] + with torch.no_grad(): + output = self.forward(video) + return output + + def generate_batch(): + + raise NotImplementedError + + def enableGrad(self, requires_grad): + for p in self.parameters(): + p.requires_grad_(requires_grad) \ No newline at end of file diff --git a/talkingface/model/image_driven_talkingface/emo_syncnet.py b/talkingface/model/image_driven_talkingface/emo_syncnet.py new file mode 100644 index 00000000..35dad6c0 --- /dev/null +++ b/talkingface/model/image_driven_talkingface/emo_syncnet.py @@ -0,0 +1,66 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .conv import Conv2d + +class SyncNet_color(nn.Module): + def __init__(self): + super(SyncNet_color, self).__init__() + + self.face_encoder = nn.Sequential( + Conv2d(15, 32, kernel_size=(7, 7), stride=1, padding=3), + + Conv2d(32, 64, kernel_size=5, stride=(1, 2), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=2, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=2, padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(512, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + self.audio_encoder = nn.Sequential( + Conv2d(1, 32, kernel_size=3, stride=1, padding=1), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=3, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + def forward(self, audio_sequences, face_sequences): # audio_sequences := (B, dim, T) + face_embedding = self.face_encoder(face_sequences) + audio_embedding = self.audio_encoder(audio_sequences) + + audio_embedding = audio_embedding.view(audio_embedding.size(0), -1) + face_embedding = face_embedding.view(face_embedding.size(0), -1) + + audio_embedding = F.normalize(audio_embedding, p=2, dim=1) + face_embedding = F.normalize(face_embedding, p=2, dim=1) + + + return audio_embedding, face_embedding \ No newline at end of file diff --git a/talkingface/model/image_driven_talkingface/wav2lip.py b/talkingface/model/image_driven_talkingface/wav2lip.py new file mode 100644 index 00000000..ff3dfb46 --- /dev/null +++ b/talkingface/model/image_driven_talkingface/wav2lip.py @@ -0,0 +1,358 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.nn.init import xavier_normal_, constant_ +from tqdm import tqdm +from os import listdir, path +import numpy as np +import os, subprocess +from glob import glob +import cv2 + +from talkingface.model.layers import Conv2d, Conv2dTranspose, nonorm_Conv2d +from talkingface.model.abstract_talkingface import AbstractTalkingFace +from talkingface.data.dataprocess.wav2lip_process import Wav2LipPreprocessForInference, Wav2LipAudio +from talkingface.utils import ensure_dir + +class SyncNet_color(nn.Module): + def __init__(self): + super(SyncNet_color, self).__init__() + + self.face_encoder = nn.Sequential( + Conv2d(15, 32, kernel_size=(7, 7), stride=1, padding=3), + + Conv2d(32, 64, kernel_size=5, stride=(1, 2), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=2, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=2, padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(512, 512, kernel_size=3, stride=2, padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + self.audio_encoder = nn.Sequential( + Conv2d(1, 32, kernel_size=3, stride=1, padding=1), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=3, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + def forward(self, audio_sequences, face_sequences): # audio_sequences := (B, dim, T) + face_embedding = self.face_encoder(face_sequences) + audio_embedding = self.audio_encoder(audio_sequences) + + audio_embedding = audio_embedding.view(audio_embedding.size(0), -1) + face_embedding = face_embedding.view(face_embedding.size(0), -1) + + audio_embedding = F.normalize(audio_embedding, p=2, dim=1) + face_embedding = F.normalize(face_embedding, p=2, dim=1) + + + return audio_embedding, face_embedding + + + + + + + +class Wav2Lip(AbstractTalkingFace): + """wav2lip is a GAN-based model that predict the final with audio and image""" + def __init__(self, config): + super(Wav2Lip, self).__init__() + + self.face_encoder_blocks = nn.ModuleList([ + nn.Sequential(Conv2d(6, 16, kernel_size=7, stride=1, padding=3)), # 96,96 + + nn.Sequential(Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # 48,48 + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(32, 64, kernel_size=3, stride=2, padding=1), # 24,24 + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(64, 128, kernel_size=3, stride=2, padding=1), # 12,12 + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(128, 256, kernel_size=3, stride=2, padding=1), # 6,6 + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True)), + + nn.Sequential(Conv2d(256, 512, kernel_size=3, stride=2, padding=1), # 3,3 + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), + + nn.Sequential(Conv2d(512, 512, kernel_size=3, stride=1, padding=0), # 1, 1 + Conv2d(512, 512, kernel_size=1, stride=1, padding=0)),]) + + self.audio_encoder = nn.Sequential( + Conv2d(1, 32, kernel_size=3, stride=1, padding=1), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(64, 128, kernel_size=3, stride=3, padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + + Conv2d(256, 512, kernel_size=3, stride=1, padding=0), + Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) + + self.face_decoder_blocks = nn.ModuleList([ + nn.Sequential(Conv2d(512, 512, kernel_size=1, stride=1, padding=0),), + + nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=1, padding=0), # 3,3 + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), + + nn.Sequential(Conv2dTranspose(1024, 512, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(512, 512, kernel_size=3, stride=1, padding=1, residual=True),), # 6, 6 + + nn.Sequential(Conv2dTranspose(768, 384, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(384, 384, kernel_size=3, stride=1, padding=1, residual=True),), # 12, 12 + + nn.Sequential(Conv2dTranspose(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),), # 24, 24 + + nn.Sequential(Conv2dTranspose(320, 128, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),), # 48, 48 + + nn.Sequential(Conv2dTranspose(160, 64, kernel_size=3, stride=2, padding=1, output_padding=1), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), + Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),),]) # 96,96 + + self.output_block = nn.Sequential(Conv2d(80, 32, kernel_size=3, stride=1, padding=1), + nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0), + nn.Sigmoid()) + self.config = config + self.l1loss = nn.L1Loss() + self.bceloss = nn.BCELoss() + def forward(self, audio_sequences, face_sequences): + # audio_sequences = (B, T, 1, 80, 16) + B = audio_sequences.size(0) + + input_dim_size = len(face_sequences.size()) + if input_dim_size > 4: + audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0) + face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0) + + audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1 + + feats = [] + x = face_sequences + for f in self.face_encoder_blocks: + x = f(x) + feats.append(x) + + x = audio_embedding + for f in self.face_decoder_blocks: + x = f(x) + try: + x = torch.cat((x, feats[-1]), dim=1) + except Exception as e: + print(x.size()) + print(feats[-1].size()) + raise e + + feats.pop() + + x = self.output_block(x) + + if input_dim_size > 4: + x = torch.split(x, B, dim=0) # [(B, C, H, W)] + outputs = torch.stack(x, dim=2) # (B, C, T, H, W) + + else: + outputs = x + + return outputs + + def predict(self, audio_sequences, face_sequences): + return self.forward(audio_sequences, face_sequences) + + def calculate_loss(self, interaction, valid=False): + r"""Calculate the training loss for a batch data. + + Args: + interaction (Interaction): Interaction class of the batch. + + Returns: + torch.Tensor: Training loss, shape: [] + """ + indiv_mels = interaction['indiv_mels'].to(self.config['device']) + input_frames = interaction['input_frames'].to(self.config['device']) + mel = interaction['mels'].to(self.config['device']) + gt = interaction['gt'].to(self.config['device']) + g_frames = self.forward(indiv_mels, input_frames) + l1loss = self.l1loss(g_frames, gt) + if self.config['syncnet_wt'] > 0 or valid: + sync_loss = self.syncnet_loss(mel, g_frames) + else: + sync_loss = 0 + + loss = self.config['syncnet_wt'] * sync_loss + (1- self.config['syncnet_wt']) * l1loss + return {"loss":loss, "l1loss":l1loss, "sync_loss":sync_loss} + + def syncnet_loss(self, mel, g_frames): + syncnet = self.load_syncnet() + syncnet.eval() + g = g_frames[:, :, :, g_frames.size(3)//2:] + g = torch.cat([g[:, :, i] for i in range(self.config['syncnet_T'])], dim=1) + # B, 3 * T, H//2, W + a, v = syncnet(mel, g) + y = torch.ones(g.size(0), 1).float().to(self.config['device']) + return self.cosine_loss(a, v, y) + + def cosine_loss(self, a, v, y): + d = nn.functional.cosine_similarity(a, v) + loss = self.bceloss(d.unsqueeze(1), y) + + return loss + + def load_syncnet(self): + syncnet = SyncNet_color().to(self.config['device']) + for p in syncnet.parameters(): + p.requires_grad = False + checkpoint = torch.load(self.config["syncnet_checkpoint_path"]) + s = checkpoint["state_dict"] + new_s = {} + for k, v in s.items(): + new_s[k.replace('module.', '')] = v + syncnet.load_state_dict(new_s) + return syncnet + + def generate_batch(self): + audio_processor = Wav2LipAudio(self.config) + video_processor = Wav2LipPreprocessForInference(self.config) + + with open(self.config['test_filelist'], 'r') as filelist: + lines = filelist.readlines() + + file_dict = {'generated_video': [], 'real_video': []} + for idx, line in enumerate(tqdm(lines, desc='generate video')): + file_src = line.split()[0] + + audio_src = os.path.join(self.config['data_root'], file_src) + '.mp4' + video = os.path.join(self.config['data_root'], file_src) + '.mp4' + + + ensure_dir(os.path.join(self.config['temp_dir'])) + + command = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'.format(audio_src, os.path.join(self.config['temp_dir'], 'temp')+'.wav') + subprocess.call(command, shell=True) + + temp_audio = os.path.join(self.config['temp_dir'], 'temp')+'.wav' + wav = audio_processor.load_wav(temp_audio, 16000) + mel = audio_processor.melspectrogram(wav) + + if np.isnan(mel.reshape(-1)).sum() > 0: + continue + + mel_idx_multiplier = 80./self.config['fps'] + mel_chunks = [] + i = 0 + while 1: + start_idx = int(i * mel_idx_multiplier) + if start_idx + self.config['mel_step_size'] > len(mel[0]): + break + mel_chunks.append(mel[:, start_idx : start_idx + self.config['mel_step_size']]) + i += 1 + + video_stream = cv2.VideoCapture(video) + full_frames = [] + while 1: + still_reading, frame = video_stream.read() + if not still_reading or len(full_frames) > len(mel_chunks): + video_stream.release() + break + full_frames.append(frame) + + if len(full_frames) < len(mel_chunks): + continue + + full_frames = full_frames[:len(mel_chunks)] + + try: + face_det_results = video_processor.face_detect(full_frames.copy()) + except ValueError as e: + continue + + batch_size = self.config['wav2lip_batch_size'] + gen = video_processor.datagen(full_frames.copy(), face_det_results, mel_chunks) + + for i, (img_batch, mel_batch, frames, coords) in enumerate(gen): + if i == 0: + frame_h, frame_w = full_frames[0].shape[:-1] + output_video_path = os.path.join(self.config['temp_dir'], 'temp')+'.mp4' + fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 或者尝试 'avc1' + out = cv2.VideoWriter(output_video_path, fourcc, 25, (frame_w, frame_h)) + + + img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(self.config['device']) + mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(self.config['device']) + + with torch.no_grad(): + pred = self.predict(mel_batch, img_batch) + + + pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255. + + for pl, f, c in zip(pred, frames, coords): + y1, y2, x1, x2 = c + pl = cv2.resize(pl.astype(np.uint8), (x2 - x1, y2 - y1)) + f[y1:y2, x1:x2] = pl + out.write(f) + + out.release() + + vid = os.path.join(self.config['temp_dir'], file_src) + '.mp4' + vid_directory = os.path.dirname(vid) + if not os.path.exists(vid_directory): + os.makedirs(vid_directory) + + command = 'ffmpeg -loglevel panic -y -i {} -i {} -strict -2 -q:v 1 {}'.format(temp_audio, + os.path.join(self.config['temp_dir'], 'temp')+'.mp4', vid) + process_status = subprocess.call(command, shell=True) + if process_status == 0: + file_dict['generated_video'].append(vid) + file_dict['real_video'].append(video) + else: + continue + return file_dict + From a94c431a7c23111049d26a7279998c4f0708f8e2 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 17:22:15 +0800 Subject: [PATCH 17/28] fix --- README.md | 4 ++-- .../dataprocess/{emogen_dataprocess.py => emogen_process.py} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename talkingface/data/dataprocess/{emogen_dataprocess.py => emogen_process.py} (100%) diff --git a/README.md b/README.md index 543a3c0f..7daa82fc 100644 --- a/README.md +++ b/README.md @@ -214,10 +214,10 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx ### 张卓远 -编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc部分模型。 +编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc情绪鉴别器模型,编写emogen.yaml。 运行步骤: 进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 -python emogen_dataprocess.py --inputfolder --preprocessed_root +python emogen_process.py --inputfolder --preprocessed_root 预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 并将转化好的视频存入./modified_videos文件夹下 数据预处理第一步转换FPS演示结果 diff --git a/talkingface/data/dataprocess/emogen_dataprocess.py b/talkingface/data/dataprocess/emogen_process.py similarity index 100% rename from talkingface/data/dataprocess/emogen_dataprocess.py rename to talkingface/data/dataprocess/emogen_process.py From f0dea90a62974e654fef7fc42770ac6502c1f308 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 17:43:35 +0800 Subject: [PATCH 18/28] Create evaluater.py --- talkingface/trainer/evaluater.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 talkingface/trainer/evaluater.py diff --git a/talkingface/trainer/evaluater.py b/talkingface/trainer/evaluater.py new file mode 100644 index 00000000..c27f4f4f --- /dev/null +++ b/talkingface/trainer/evaluater.py @@ -0,0 +1,11 @@ +# 导入 Trainer 类 +from trainer import Trainer + +# 创建 Trainer 类的实例 +trainer = Trainer() + +# 调用 evaluate 方法 + +trainer.evaluate(model_file="D:/visual studio/talking face/checkpoints") + + From 440fcd2c955db7836f80324571041cc598ce4b3d Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 17:53:34 +0800 Subject: [PATCH 19/28] Update evaluater.py --- talkingface/trainer/evaluater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/talkingface/trainer/evaluater.py b/talkingface/trainer/evaluater.py index c27f4f4f..75ff7700 100644 --- a/talkingface/trainer/evaluater.py +++ b/talkingface/trainer/evaluater.py @@ -6,6 +6,6 @@ # 调用 evaluate 方法 -trainer.evaluate(model_file="D:/visual studio/talking face/checkpoints") +trainer.evaluate(model_file="../../checkpoints") From ffc2414d32b69327bcb9ad1ba22a3c046fd3db8f Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 17:54:00 +0800 Subject: [PATCH 20/28] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 7daa82fc..d7db565c 100644 --- a/README.md +++ b/README.md @@ -215,10 +215,15 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx ### 张卓远 编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc情绪鉴别器模型,编写emogen.yaml。 + 运行步骤: + 进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 + python emogen_process.py --inputfolder --preprocessed_root + 预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 + 并将转化好的视频存入./modified_videos文件夹下 数据预处理第一步转换FPS演示结果 转换视频FPS结果 From a28b2fb87a822c5a18d7ef7caca6e16507b153c8 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 17:58:34 +0800 Subject: [PATCH 21/28] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d7db565c..ef05c6a9 100644 --- a/README.md +++ b/README.md @@ -220,7 +220,7 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx 进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 -python emogen_process.py --inputfolder --preprocessed_root +python emogen_process.py --input_folder --preprocessed_root 预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 From 06e358ffe3c2a37c6c723aed1c5ca9d27d45593e Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:39:48 +0800 Subject: [PATCH 22/28] Update README.md --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index ef05c6a9..0aa92024 100644 --- a/README.md +++ b/README.md @@ -208,10 +208,23 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx ## 小组README文件 +### 项目介绍-Emotionally Enhanced Talking Face Generation + +"情感增强的说话面部生成"这篇论文主要关注于通过加入广泛的情感范围来创建更加逼真和有说服力的说话面部视频。它解决了以往工作的局限性,这些工作通常无法创建逼真的视频,因为它们很少关注人物的表情和情感。本项目提出的框架旨在生成包含适当表情和情感的唇同步说话面部视频,使其更具说服力。 + +### 项目功能 +说话面部生成:该框架基于基础骨架架构,使用2D-CNN编解码器网络生成单独的帧。这涉及到一个面部编码器、一个音频编码器和一个解码器,强调视觉质量和准确的唇同步生成。 + +说话面部生成中的情感捕捉:这是关键部分,因为它涉及将情感信息包含在视频中。该方法将语音音频中表示的情感与视频生成的独立情感标签分开,提供了更多控制主题情感的方法。 + +数据预处理和增强:该框架使用完全遮盖的帧以及参考帧来加入情感,因为情感不仅通过面部的嘴唇区域来表达。 + +情感编码器:这将分类情感编码进视频生成过程。 ### 环境要求(依赖) 依赖库详见 requirements.txt。要求安装ffmpeg, 安装albumentations库 + ### 张卓远 编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc情绪鉴别器模型,编写emogen.yaml。 @@ -253,6 +266,10 @@ python emogen_process.py --input_folder --preprocessed_root 在trainer文件夹中添加color_syncnet_train.py, emotion_disc_train.py +### 周扬 +填充了image_driven所需的模型文件,包括conv.py,emo_disc.py,emo_syncnet.py和wav2lip.py + + From e932d1eed2c4a8be2ed293e1c951b48749ec48b1 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:40:33 +0800 Subject: [PATCH 23/28] Create README_EMOGEN.md --- README_EMOGEN.md | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 README_EMOGEN.md diff --git a/README_EMOGEN.md b/README_EMOGEN.md new file mode 100644 index 00000000..8cf225d9 --- /dev/null +++ b/README_EMOGEN.md @@ -0,0 +1,61 @@ +## 小组README文件 +### 项目介绍-Emotionally Enhanced Talking Face Generation + +"情感增强的说话面部生成"这篇论文主要关注于通过加入广泛的情感范围来创建更加逼真和有说服力的说话面部视频。它解决了以往工作的局限性,这些工作通常无法创建逼真的视频,因为它们很少关注人物的表情和情感。本项目提出的框架旨在生成包含适当表情和情感的唇同步说话面部视频,使其更具说服力。 + +### 项目功能 +说话面部生成:该框架基于基础骨架架构,使用2D-CNN编解码器网络生成单独的帧。这涉及到一个面部编码器、一个音频编码器和一个解码器,强调视觉质量和准确的唇同步生成。 + +说话面部生成中的情感捕捉:这是关键部分,因为它涉及将情感信息包含在视频中。该方法将语音音频中表示的情感与视频生成的独立情感标签分开,提供了更多控制主题情感的方法。 + +数据预处理和增强:该框架使用完全遮盖的帧以及参考帧来加入情感,因为情感不仅通过面部的嘴唇区域来表达。 + +情感编码器:这将分类情感编码进视频生成过程。 + +### 环境要求(依赖) +依赖库详见 requirements.txt。要求安装ffmpeg, 安装albumentations库 + + +### 张卓远 + +编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc情绪鉴别器模型,编写emogen.yaml。 + +运行步骤: + +进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 + +python emogen_process.py --input_folder --preprocessed_root + +预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 + +并将转化好的视频存入./modified_videos文件夹下 +数据预处理第一步转换FPS演示结果 +转换视频FPS结果 +程序会自动运行数据预处理第二步: +数据预处理第二步演示 +由于本机内存空间不足转为使用其他云服务器测试,完成数据预处理的过程。 +预处理内存空间不足报错 +预处理完成演示 + +接下来训练情绪鉴别器: + +训练情绪鉴别器结果演示 + +由于训练过程过长,可以在中途输入Ctrl^C以停止训练 + + +### 蒋政 +完成训练部分代码编写,实现专家口型同步鉴别器模型 + +修改requirements.txt + +修改utils文件夹中的utils.py + +在model文件夹中添加 wav2lip.py,修改__init__.py + +在model/audio_driven_talkingface下添加conv.py , emogen_syncnet.py + +在trainer文件夹中添加color_syncnet_train.py, emotion_disc_train.py + +### 周扬 +填充了image_driven所需的模型文件,包括conv.py,emo_disc.py,emo_syncnet.py和wav2lip.py From 43ad1b436a57bac565e9a8634bb8a6689371d19a Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:41:31 +0800 Subject: [PATCH 24/28] Update README_EMOGEN.md --- README_EMOGEN.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README_EMOGEN.md b/README_EMOGEN.md index 8cf225d9..226607c3 100644 --- a/README_EMOGEN.md +++ b/README_EMOGEN.md @@ -1,9 +1,9 @@ -## 小组README文件 -### 项目介绍-Emotionally Enhanced Talking Face Generation +# 小组README文件 +## 项目介绍-Emotionally Enhanced Talking Face Generation "情感增强的说话面部生成"这篇论文主要关注于通过加入广泛的情感范围来创建更加逼真和有说服力的说话面部视频。它解决了以往工作的局限性,这些工作通常无法创建逼真的视频,因为它们很少关注人物的表情和情感。本项目提出的框架旨在生成包含适当表情和情感的唇同步说话面部视频,使其更具说服力。 -### 项目功能 +## 项目功能 说话面部生成:该框架基于基础骨架架构,使用2D-CNN编解码器网络生成单独的帧。这涉及到一个面部编码器、一个音频编码器和一个解码器,强调视觉质量和准确的唇同步生成。 说话面部生成中的情感捕捉:这是关键部分,因为它涉及将情感信息包含在视频中。该方法将语音音频中表示的情感与视频生成的独立情感标签分开,提供了更多控制主题情感的方法。 @@ -12,9 +12,10 @@ 情感编码器:这将分类情感编码进视频生成过程。 -### 环境要求(依赖) +## 环境要求(依赖) 依赖库详见 requirements.txt。要求安装ffmpeg, 安装albumentations库 +## 实现及演示 ### 张卓远 From f9009900d6881ea8e7345770eed194dc700a15be Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:42:04 +0800 Subject: [PATCH 25/28] Update README.md --- README.md | 62 ------------------------------------------------------- 1 file changed, 62 deletions(-) diff --git a/README.md b/README.md index 0aa92024..a299cd15 100644 --- a/README.md +++ b/README.md @@ -207,68 +207,6 @@ python run_talkingface.py --model=xxxx --dataset=xxxx (--other_parameters=xxxxxx - 每个组都要提交一个README文件,写明完成的功能、最终实现的训练、验证截图、所使用的依赖、成员分工等。 -## 小组README文件 -### 项目介绍-Emotionally Enhanced Talking Face Generation - -"情感增强的说话面部生成"这篇论文主要关注于通过加入广泛的情感范围来创建更加逼真和有说服力的说话面部视频。它解决了以往工作的局限性,这些工作通常无法创建逼真的视频,因为它们很少关注人物的表情和情感。本项目提出的框架旨在生成包含适当表情和情感的唇同步说话面部视频,使其更具说服力。 - -### 项目功能 -说话面部生成:该框架基于基础骨架架构,使用2D-CNN编解码器网络生成单独的帧。这涉及到一个面部编码器、一个音频编码器和一个解码器,强调视觉质量和准确的唇同步生成。 - -说话面部生成中的情感捕捉:这是关键部分,因为它涉及将情感信息包含在视频中。该方法将语音音频中表示的情感与视频生成的独立情感标签分开,提供了更多控制主题情感的方法。 - -数据预处理和增强:该框架使用完全遮盖的帧以及参考帧来加入情感,因为情感不仅通过面部的嘴唇区域来表达。 - -情感编码器:这将分类情感编码进视频生成过程。 - -### 环境要求(依赖) -依赖库详见 requirements.txt。要求安装ffmpeg, 安装albumentations库 - - -### 张卓远 - -编写实现了模型的数据预处理和数据加载代码,编写实现了模型emo_disc情绪鉴别器模型,编写emogen.yaml。 - -运行步骤: - -进入talkingface/data/dataprocess/文件夹下运行控制台,输入命令 - -python emogen_process.py --input_folder --preprocessed_root - -预处理会先将视频转化为25帧格式,此过程需要安装ffmpeg并添加为环境变量。 - -并将转化好的视频存入./modified_videos文件夹下 -数据预处理第一步转换FPS演示结果 -转换视频FPS结果 -程序会自动运行数据预处理第二步: -数据预处理第二步演示 -由于本机内存空间不足转为使用其他云服务器测试,完成数据预处理的过程。 -预处理内存空间不足报错 -预处理完成演示 - -接下来训练情绪鉴别器: - -训练情绪鉴别器结果演示 - -由于训练过程过长,可以在中途输入Ctrl^C以停止训练 - - -### 蒋政 -完成训练部分代码编写,实现专家口型同步鉴别器模型 - -修改requirements.txt - -修改utils文件夹中的utils.py - -在model文件夹中添加 wav2lip.py,修改__init__.py - -在model/audio_driven_talkingface下添加conv.py , emogen_syncnet.py - -在trainer文件夹中添加color_syncnet_train.py, emotion_disc_train.py - -### 周扬 -填充了image_driven所需的模型文件,包括conv.py,emo_disc.py,emo_syncnet.py和wav2lip.py - From 414445ec010c42f1b4b424606af4bb14ba563934 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:59:07 +0800 Subject: [PATCH 26/28] saved --- saved/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 saved/README.md diff --git a/saved/README.md b/saved/README.md new file mode 100644 index 00000000..7305d0f8 --- /dev/null +++ b/saved/README.md @@ -0,0 +1 @@ +保存训练完成的权重 From f95ac1ee36cf76815697ee91d65200eec5cc9fbe Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang Date: Tue, 30 Jan 2024 20:10:15 +0800 Subject: [PATCH 27/28] commitInfo --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..8893e766 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +“.pth” filter=lfs diff=lfs merge=lfs -text +.pth filter=lfs diff=lfs merge=lfs -text From 48e852e3faf63516f952f17c6a556d35f1aad118 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Zhang <103866519+zhangyuanyuan02@users.noreply.github.com> Date: Tue, 30 Jan 2024 20:43:59 +0800 Subject: [PATCH 28/28] Update and rename README.md to README-EMOGEN.md --- saved/README-EMOGEN.md | 4 ++++ saved/README.md | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 saved/README-EMOGEN.md delete mode 100644 saved/README.md diff --git a/saved/README-EMOGEN.md b/saved/README-EMOGEN.md new file mode 100644 index 00000000..8f1d1da9 --- /dev/null +++ b/saved/README-EMOGEN.md @@ -0,0 +1,4 @@ +保存训练完成的权重文件 +由于github只允许上传不超过100mb文件,请从百度网盘下载 +链接:https://pan.baidu.com/s/1Rdtpv7P38HYzPckk3pqCEA?pwd=emog +提取码:emog diff --git a/saved/README.md b/saved/README.md deleted file mode 100644 index 7305d0f8..00000000 --- a/saved/README.md +++ /dev/null @@ -1 +0,0 @@ -保存训练完成的权重