diff --git a/MuseTalk_project/3DMM/README.md b/MuseTalk_project/3DMM/README.md new file mode 100644 index 00000000..ab229059 --- /dev/null +++ b/MuseTalk_project/3DMM/README.md @@ -0,0 +1,35 @@ +# 基于3DMM点云的TFG评价指标 + +在talking face generation领域,正确地评估生成的视频中人物动作、神态等身份信息对于提高生成质量有重要帮助,更有助于对于不同方法间的公平比较。传统的图像评价指标如psnr、ssim等并不能直接准确表示人物信息,可能受非人物的因素影响(如图像背景等)。对此,我们提出了一种全新的评价指标,通过抽取视频中的人脸点云信息,将生成的视频与Grand Truth进行对比,完成生成质量的评估。 + +### 评价方法 + +Deep3DFaceRecon 是一种使用CNN对人脸进行3D重建的方法,在重建速度、精确度、鲁棒性方面有了很大提升。它不仅能够完成对人脸的识别和提取关键点,还能够获取人物表情、动作等特征,用特征向量加以表示。![image-20241222024034296](C:\Users\14879\AppData\Roaming\Typora\typora-user-images\image-20241222024034296.png) + +使用Deep3DFaceRecon方法抽取视频中人脸的关键点信息,得到 lm68 点云集。将生成视频与GT的关键点按照以下方式进行比较,进行标准化后根据MSE计算评价指标: +$$ +\log(\sum_{i=1}^{68}\lambda_i[(x_i-\hat{x_i})^2+(y_i-\hat{y_i})^2]+m_i) +$$ +其中,$(x,y)$ 代表点云的坐标,$\lambda_i$ 表示权重系数,可以据此对目标人物脸部的不同部位(如唇部、眼睛等)进行加权评估,以突出不同部位的渲染效果。$m_i$ 为偏移量。在下面的评测中,唇部位置 $\lambda _i=1.2$ 其余位置 $\lambda _i =1.0$ 。$m_i = 5$。 + +### 如何使用 + +1. 通过 ``pip install -r requirements`` 安装对应环境 +2. 将需要对比的视频放置根目录,更改eval.py中的默认路径 +3. 运行eval.py + +### 评测结果 + +按照以上指标对测试集视频进行评测,结果如下: + +| 视频 | MuseTalk(30s) | MuseTalk(half) | +| ------- | ------------- | -------------- | +| Jae-in | 3.5845 | 3.5606 | +| Lieu | 4.4873 | 4.3448 | +| Macron | 5.0920 | 5.0923 | +| May | 4.4909 | 4.5529 | +| Obama | 4.3296 | 4.7277 | +| Obama1 | 4.8002 | 4.9230 | +| Obama2 | 4.5330 | 4.6687 | +| Shaheen | 3.8962 | 4.1104 | + diff --git a/MuseTalk_project/3DMM/deep_3drecon/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/__init__.py new file mode 100644 index 00000000..6866fab1 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/__init__.py @@ -0,0 +1 @@ +from .reconstructor import * diff --git a/MuseTalk_project/3DMM/deep_3drecon/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..e2a4c892 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/__pycache__/reconstructor.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/__pycache__/reconstructor.cpython-39.pyc new file mode 100644 index 00000000..5568ead9 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/__pycache__/reconstructor.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/data/__init__.py new file mode 100644 index 00000000..56fe2126 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data/__init__.py @@ -0,0 +1,116 @@ +"""This package includes all the modules related to data loading and preprocessing + + To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. + You need to implement four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point from data loader. + -- : (optionally) add dataset-specific options and set default options. + +Now you can use the dataset class by specifying flag '--dataset_mode dummy'. +See our template dataset class 'template_dataset.py' for more details. +""" +import numpy as np +import importlib +import torch.utils.data +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + """Import the module "data/[dataset_name]_dataset.py". + + In the file, the class called DatasetNameDataset() will + be instantiated. It has to be a subclass of BaseDataset, + and it is case-insensitive. + """ + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + + return dataset + + +def get_option_setter(dataset_name): + """Return the static method of the dataset class.""" + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt, rank=0): + """Create a dataset given the option. + + This function wraps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from data import create_dataset + >>> dataset = create_dataset(opt) + """ + data_loader = CustomDatasetDataLoader(opt, rank=rank) + dataset = data_loader.load_data() + return dataset + +class CustomDatasetDataLoader(): + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt, rank=0): + """Initialize this class + + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + dataset_class = find_dataset_using_name(opt.dataset_mode) + self.dataset = dataset_class(opt) + self.sampler = None + print("rank %d %s dataset [%s] was created" % (rank, self.dataset.name, type(self.dataset).__name__)) + if opt.use_ddp and opt.isTrain: + world_size = opt.world_size + self.sampler = torch.utils.data.distributed.DistributedSampler( + self.dataset, + num_replicas=world_size, + rank=rank, + shuffle=not opt.serial_batches + ) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + sampler=self.sampler, + num_workers=int(opt.num_threads / world_size), + batch_size=int(opt.batch_size / world_size), + drop_last=True) + else: + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=(not opt.serial_batches) and opt.isTrain, + num_workers=int(opt.num_threads), + drop_last=True + ) + + def set_epoch(self, epoch): + self.dataset.current_epoch = epoch + if self.sampler is not None: + self.sampler.set_epoch(epoch) + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..dbbe24e3 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/base_dataset.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/base_dataset.cpython-39.pyc new file mode 100644 index 00000000..024b1602 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/data/__pycache__/base_dataset.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/base_dataset.py b/MuseTalk_project/3DMM/deep_3drecon/data/base_dataset.py new file mode 100644 index 00000000..1bd57d08 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data/base_dataset.py @@ -0,0 +1,125 @@ +"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets. + +It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. +""" +import random +import numpy as np +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms +from abc import ABC, abstractmethod + + +class BaseDataset(data.Dataset, ABC): + """This class is an abstract base class (ABC) for datasets. + + To create a subclass, you need to implement the following four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point. + -- : (optionally) add dataset-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the class; save the options in the class + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + self.opt = opt + # self.root = opt.dataroot + self.current_epoch = 0 + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def __len__(self): + """Return the total number of images in the dataset.""" + return 0 + + @abstractmethod + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns: + a dictionary of data with their names. It ususally contains the data itself and its metadata information. + """ + pass + + +def get_transform(grayscale=False): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + transform_list += [transforms.ToTensor()] + return transforms.Compose(transform_list) + +def get_affine_mat(opt, size): + shift_x, shift_y, scale, rot_angle, flip = 0., 0., 1., 0., False + w, h = size + + if 'shift' in opt.preprocess: + shift_pixs = int(opt.shift_pixs) + shift_x = random.randint(-shift_pixs, shift_pixs) + shift_y = random.randint(-shift_pixs, shift_pixs) + if 'scale' in opt.preprocess: + scale = 1 + opt.scale_delta * (2 * random.random() - 1) + if 'rot' in opt.preprocess: + rot_angle = opt.rot_angle * (2 * random.random() - 1) + rot_rad = -rot_angle * np.pi/180 + if 'flip' in opt.preprocess: + flip = random.random() > 0.5 + + shift_to_origin = np.array([1, 0, -w//2, 0, 1, -h//2, 0, 0, 1]).reshape([3, 3]) + flip_mat = np.array([-1 if flip else 1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape([3, 3]) + shift_mat = np.array([1, 0, shift_x, 0, 1, shift_y, 0, 0, 1]).reshape([3, 3]) + rot_mat = np.array([np.cos(rot_rad), np.sin(rot_rad), 0, -np.sin(rot_rad), np.cos(rot_rad), 0, 0, 0, 1]).reshape([3, 3]) + scale_mat = np.array([scale, 0, 0, 0, scale, 0, 0, 0, 1]).reshape([3, 3]) + shift_to_center = np.array([1, 0, w//2, 0, 1, h//2, 0, 0, 1]).reshape([3, 3]) + + affine = shift_to_center @ scale_mat @ rot_mat @ shift_mat @ flip_mat @ shift_to_origin + affine_inv = np.linalg.inv(affine) + return affine, affine_inv, flip + +def apply_img_affine(img, affine_inv, method=Image.BICUBIC): + return img.transform(img.size, Image.AFFINE, data=affine_inv.flatten()[:6], resample=Image.BICUBIC) + +def apply_lm_affine(landmark, affine, flip, size): + _, h = size + lm = landmark.copy() + lm[:, 1] = h - 1 - lm[:, 1] + lm = np.concatenate((lm, np.ones([lm.shape[0], 1])), -1) + lm = lm @ np.transpose(affine) + lm[:, :2] = lm[:, :2] / lm[:, 2:] + lm = lm[:, :2] + lm[:, 1] = h - 1 - lm[:, 1] + if flip: + lm_ = lm.copy() + lm_[:17] = lm[16::-1] + lm_[17:22] = lm[26:21:-1] + lm_[22:27] = lm[21:16:-1] + lm_[31:36] = lm[35:30:-1] + lm_[36:40] = lm[45:41:-1] + lm_[40:42] = lm[47:45:-1] + lm_[42:46] = lm[39:35:-1] + lm_[46:48] = lm[41:39:-1] + lm_[48:55] = lm[54:47:-1] + lm_[55:60] = lm[59:54:-1] + lm_[60:65] = lm[64:59:-1] + lm_[65:68] = lm[67:64:-1] + lm = lm_ + return lm diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/flist_dataset.py b/MuseTalk_project/3DMM/deep_3drecon/data/flist_dataset.py new file mode 100644 index 00000000..c0b6945c --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data/flist_dataset.py @@ -0,0 +1,125 @@ +"""This script defines the custom dataset for Deep3DFaceRecon_pytorch +""" + +import os.path +from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util +import numpy as np +import json +import torch +from scipy.io import loadmat, savemat +import pickle +from util.preprocess import align_img, estimate_norm +from util.load_mats import load_lm3d + + +def default_flist_reader(flist): + """ + flist format: impath label\nimpath label\n ...(same to caffe's filelist) + """ + imlist = [] + with open(flist, 'r') as rf: + for line in rf.readlines(): + impath = line.strip() + imlist.append(impath) + + return imlist + +def jason_flist_reader(flist): + with open(flist, 'r') as fp: + info = json.load(fp) + return info + +def parse_label(label): + return torch.tensor(np.array(label).astype(np.float32)) + + +class FlistDataset(BaseDataset): + """ + It requires one directories to host training images '/path/to/data/train' + You can train the model with the dataset flag '--dataroot /path/to/data'. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + + self.lm3d_std = load_lm3d(opt.bfm_folder) + + msk_names = default_flist_reader(opt.flist) + self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names] + + self.size = len(self.msk_paths) + self.opt = opt + + self.name = 'train' if opt.isTrain else 'val' + if '_' in opt.flist: + self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0] + + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + img (tensor) -- an image in the input domain + msk (tensor) -- its corresponding attention mask + lm (tensor) -- its corresponding 3d landmarks + im_paths (str) -- image paths + aug_flag (bool) -- a flag used to tell whether its raw or augmented + """ + msk_path = self.msk_paths[index % self.size] # make sure index is within then range + img_path = msk_path.replace('mask/', '') + lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt' + + raw_img = Image.open(img_path).convert('RGB') + raw_msk = Image.open(msk_path).convert('RGB') + raw_lm = np.loadtxt(lm_path).astype(np.float32) + + _, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk) + + aug_flag = self.opt.use_aug and self.opt.isTrain + if aug_flag: + img, lm, msk = self._augmentation(img, lm, self.opt, msk) + + _, H = img.size + M = estimate_norm(lm, H) + transform = get_transform() + img_tensor = transform(img) + msk_tensor = transform(msk)[:1, ...] + lm_tensor = parse_label(lm) + M_tensor = parse_label(M) + + + return {'imgs': img_tensor, + 'lms': lm_tensor, + 'msks': msk_tensor, + 'M': M_tensor, + 'im_paths': img_path, + 'aug_flag': aug_flag, + 'dataset': self.name} + + def _augmentation(self, img, lm, opt, msk=None): + affine, affine_inv, flip = get_affine_mat(opt, img.size) + img = apply_img_affine(img, affine_inv) + lm = apply_lm_affine(lm, affine, flip, img.size) + if msk is not None: + msk = apply_img_affine(msk, affine_inv, method=Image.BILINEAR) + return img, lm, msk + + + + + def __len__(self): + """Return the total number of images in the dataset. + """ + return self.size diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/image_folder.py b/MuseTalk_project/3DMM/deep_3drecon/data/image_folder.py new file mode 100644 index 00000000..efadc2ec --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data/image_folder.py @@ -0,0 +1,66 @@ +"""A modified image folder class + +We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) +so that this class can load images from both current directory and its subdirectories. +""" +import numpy as np +import torch.utils.data as data + +from PIL import Image +import os +import os.path + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', + '.tif', '.TIF', '.tiff', '.TIFF', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, max_dataset_size=float("inf")): + images = [] + assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir, followlinks=True)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + images.append(path) + return images[:min(max_dataset_size, len(images))] + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/MuseTalk_project/3DMM/deep_3drecon/data/template_dataset.py b/MuseTalk_project/3DMM/deep_3drecon/data/template_dataset.py new file mode 100644 index 00000000..bfdf16be --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data/template_dataset.py @@ -0,0 +1,75 @@ +"""Dataset class template + +This module provides a template for users to implement custom datasets. +You can specify '--dataset_mode template' to use this dataset. +The class name should be consistent with both the filename and its dataset_mode option. +The filename should be _dataset.py +The class name should be Dataset.py +You need to implement the following functions: + -- : Add dataset-specific options and rewrite default values for existing options. + -- <__init__>: Initialize this dataset class. + -- <__getitem__>: Return a data point and its metadata information. + -- <__len__>: Return the number of images. +""" +from data.base_dataset import BaseDataset, get_transform +# from data.image_folder import make_dataset +# from PIL import Image + + +class TemplateDataset(BaseDataset): + """A template dataset class for you to implement custom datasets.""" + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') + parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values + return parser + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + + A few things can be done here. + - save the options (have been done in BaseDataset) + - get image paths and meta information of the dataset. + - define the image transformation. + """ + # save the option and dataset root + BaseDataset.__init__(self, opt) + # get the image paths of your dataset; + self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root + # define the default transform function. You can use ; You can also define your custom transform function + self.transform = get_transform(opt) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index -- a random integer for data indexing + + Returns: + a dictionary of data with their names. It usually contains the data itself and its metadata information. + + Step 1: get a random image path: e.g., path = self.image_paths[index] + Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). + Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) + Step 4: return a data point as a dictionary. + """ + path = 'temp' # needs to be a string + data_A = None # needs to be a tensor + data_B = None # needs to be a tensor + return {'data_A': data_A, 'data_B': data_B, 'path': path} + + def __len__(self): + """Return the total number of images.""" + return len(self.image_paths) diff --git a/MuseTalk_project/3DMM/deep_3drecon/data_preparation.py b/MuseTalk_project/3DMM/deep_3drecon/data_preparation.py new file mode 100644 index 00000000..6ffc79d3 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/data_preparation.py @@ -0,0 +1,45 @@ +"""This script is the data preparation script for Deep3DFaceRecon_pytorch +""" + +import os +import numpy as np +import argparse +from util.detect_lm68 import detect_68p,load_lm_graph +from util.skin_mask import get_skin_mask +from util.generate_list import check_list, write_list +import warnings +warnings.filterwarnings("ignore") + +parser = argparse.ArgumentParser() +parser.add_argument('--data_root', type=str, default='datasets', help='root directory for training data') +parser.add_argument('--img_folder', nargs="+", required=True, help='folders of training images') +parser.add_argument('--mode', type=str, default='train', help='train or val') +opt = parser.parse_args() + +os.environ['CUDA_VISIBLE_DEVICES'] = '0' + +def data_prepare(folder_list,mode): + + lm_sess,input_op,output_op = load_lm_graph('./checkpoints/lm_model/68lm_detector.pb') # load a tensorflow version 68-landmark detector + + for img_folder in folder_list: + detect_68p(img_folder,lm_sess,input_op,output_op) # detect landmarks for images + get_skin_mask(img_folder) # generate skin attention mask for images + + # create files that record path to all training data + msks_list = [] + for img_folder in folder_list: + path = os.path.join(img_folder, 'mask') + msks_list += ['/'.join([img_folder, 'mask', i]) for i in sorted(os.listdir(path)) if 'jpg' in i or + 'png' in i or 'jpeg' in i or 'PNG' in i] + + imgs_list = [i.replace('mask/', '') for i in msks_list] + lms_list = [i.replace('mask', 'landmarks') for i in msks_list] + lms_list = ['.'.join(i.split('.')[:-1]) + '.txt' for i in lms_list] + + lms_list_final, imgs_list_final, msks_list_final = check_list(lms_list, imgs_list, msks_list) # check if the path is valid + write_list(lms_list_final, imgs_list_final, msks_list_final, mode=mode) # save files + +if __name__ == '__main__': + print('Datasets:',opt.img_folder) + data_prepare([os.path.join(opt.data_root,folder) for folder in opt.img_folder],opt.mode) diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000002.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000002.jpg new file mode 100644 index 00000000..dc7ebcbf Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000002.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000006.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000006.jpg new file mode 100644 index 00000000..725e86c8 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000006.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000007.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000007.jpg new file mode 100644 index 00000000..443c8068 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000007.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000031.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000031.jpg new file mode 100644 index 00000000..46bdce9c Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000031.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000033.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000033.jpg new file mode 100644 index 00000000..c105797a Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000033.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000037.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000037.jpg new file mode 100644 index 00000000..6cba8201 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000037.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000050.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000050.jpg new file mode 100644 index 00000000..8513d730 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000050.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000055.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000055.jpg new file mode 100644 index 00000000..1ea7e0c1 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000055.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000114.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000114.jpg new file mode 100644 index 00000000..abf24cab Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000114.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000125.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000125.jpg new file mode 100644 index 00000000..272d4a4f Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000125.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000126.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000126.jpg new file mode 100644 index 00000000..e7a9a907 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/000126.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015259.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015259.jpg new file mode 100644 index 00000000..a421abf7 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015259.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015270.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015270.jpg new file mode 100644 index 00000000..d4cd516e Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015270.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015309.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015309.jpg new file mode 100644 index 00000000..6331a72b Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015309.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015310.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015310.jpg new file mode 100644 index 00000000..71bf911d Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015310.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015316.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015316.jpg new file mode 100644 index 00000000..3a7ca67e Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015316.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015384.jpg b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015384.jpg new file mode 100644 index 00000000..9dfaceff Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/015384.jpg differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000002.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000002.txt new file mode 100644 index 00000000..0c0abbda --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000002.txt @@ -0,0 +1,5 @@ +142.84 207.18 +222.02 203.9 +159.24 253.57 +146.59 290.93 +227.52 284.74 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000006.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000006.txt new file mode 100644 index 00000000..28d4d3d2 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000006.txt @@ -0,0 +1,5 @@ +199.93 158.28 +255.34 166.54 +236.08 198.92 +198.83 229.24 +245.23 234.52 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000007.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000007.txt new file mode 100644 index 00000000..be564ec4 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000007.txt @@ -0,0 +1,5 @@ +129.36 198.28 +204.47 191.47 +164.42 240.51 +140.74 277.77 +205.4 270.9 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000031.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000031.txt new file mode 100644 index 00000000..10467f13 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000031.txt @@ -0,0 +1,5 @@ +151.23 240.71 +274.05 235.52 +217.37 305.99 +158.03 346.06 +272.17 341.09 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000033.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000033.txt new file mode 100644 index 00000000..e226473b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000033.txt @@ -0,0 +1,5 @@ +119.09 94.291 +158.31 96.472 +136.76 121.4 +119.33 134.49 +154.66 136.68 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000037.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000037.txt new file mode 100644 index 00000000..ebdc113d --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000037.txt @@ -0,0 +1,5 @@ +147.37 159.39 +196.94 163.26 +190.68 194.36 +153.72 228.44 +193.94 229.7 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000050.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000050.txt new file mode 100644 index 00000000..67eed576 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000050.txt @@ -0,0 +1,5 @@ +150.4 94.799 +205.14 102.07 +179.54 131.16 +144.45 147.42 +193.39 154.14 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000055.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000055.txt new file mode 100644 index 00000000..4eec3411 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000055.txt @@ -0,0 +1,5 @@ +114.26 193.42 +205.8 190.27 +154.15 244.02 +124.69 295.22 +200.88 292.69 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000114.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000114.txt new file mode 100644 index 00000000..f7c78193 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000114.txt @@ -0,0 +1,5 @@ +217.52 152.95 +281.48 147.14 +253.02 196.03 +225.79 221.6 +288.25 214.44 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000125.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000125.txt new file mode 100644 index 00000000..c6c705d8 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000125.txt @@ -0,0 +1,5 @@ +90.928 99.858 +146.87 100.33 +114.22 130.36 +91.579 153.32 +143.63 153.56 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000126.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000126.txt new file mode 100644 index 00000000..e34d1bd0 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/000126.txt @@ -0,0 +1,5 @@ +307.56 166.54 +387.06 159.62 +335.52 222.26 +319.3 248.85 +397.71 239.14 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015259.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015259.txt new file mode 100644 index 00000000..9c2ab86f --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015259.txt @@ -0,0 +1,5 @@ +226.38 193.65 +319.12 208.97 +279.99 245.88 +213.79 290.55 +303.03 302.1 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015270.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015270.txt new file mode 100644 index 00000000..335dcd6c --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015270.txt @@ -0,0 +1,5 @@ +208.4 410.08 +364.41 388.68 +291.6 503.57 +244.82 572.86 +383.18 553.49 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015309.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015309.txt new file mode 100644 index 00000000..309a6331 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015309.txt @@ -0,0 +1,5 @@ +284.61 496.57 +562.77 550.78 +395.85 712.84 +238.92 786.8 +495.61 827.22 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015310.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015310.txt new file mode 100644 index 00000000..7ce6a510 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015310.txt @@ -0,0 +1,5 @@ +153.95 153.43 +211.13 161.54 +197.28 190.26 +150.82 215.98 +202.32 223.12 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015316.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015316.txt new file mode 100644 index 00000000..0743b137 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015316.txt @@ -0,0 +1,5 @@ +481.31 396.88 +667.75 392.43 +557.81 440.55 +490.44 586.28 +640.56 583.2 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015384.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015384.txt new file mode 100644 index 00000000..b49f9e98 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/015384.txt @@ -0,0 +1,5 @@ +191.79 143.97 +271.86 151.23 +191.25 210.29 +187.82 257.12 +258.82 261.96 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd006.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd006.txt new file mode 100644 index 00000000..5fc0f2df --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd006.txt @@ -0,0 +1,5 @@ +123.12 117.58 +176.59 122.09 +126.99 144.68 +117.61 183.43 +163.94 186.41 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd025.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd025.txt new file mode 100644 index 00000000..0c5bf97b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd025.txt @@ -0,0 +1,5 @@ +180.12 116.13 +263.18 98.397 +230.48 154.72 +201.37 199.01 +279.18 182.56 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd026.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd026.txt new file mode 100644 index 00000000..f4cedc32 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd026.txt @@ -0,0 +1,5 @@ +171.27 263.54 +286.58 263.88 +203.35 333.02 +170.6 389.42 +281.73 386.84 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd034.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd034.txt new file mode 100644 index 00000000..f799cc11 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd034.txt @@ -0,0 +1,5 @@ +136.01 167.83 +195.25 151.71 +152.89 191.45 +149.85 235.5 +201.16 222.8 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd051.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd051.txt new file mode 100644 index 00000000..38576331 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd051.txt @@ -0,0 +1,5 @@ +161.92 292.04 +254.21 283.81 +212.75 342.06 +170.78 387.28 +254.6 379.82 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd070.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd070.txt new file mode 100644 index 00000000..8f02c191 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd070.txt @@ -0,0 +1,5 @@ +276.53 290.35 +383.38 294.75 +314.48 354.66 +275.08 407.72 +364.94 411.48 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd092.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd092.txt new file mode 100644 index 00000000..679b2891 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd092.txt @@ -0,0 +1,5 @@ +108.59 149.07 +157.35 143.85 +134.4 173.2 +117.88 200.79 +159.56 196.36 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd102.txt b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd102.txt new file mode 100644 index 00000000..6fa2643b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/detections/vd102.txt @@ -0,0 +1,5 @@ +121.62 225.96 +186.73 223.07 +162.99 269.82 +132.12 302.62 +186.42 299.21 diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd006.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd006.png new file mode 100644 index 00000000..681e3847 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd006.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd025.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd025.png new file mode 100644 index 00000000..a12e8d57 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd025.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd026.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd026.png new file mode 100644 index 00000000..96a06a7a Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd026.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd034.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd034.png new file mode 100644 index 00000000..2c0000f3 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd034.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd051.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd051.png new file mode 100644 index 00000000..9e841e50 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd051.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd070.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd070.png new file mode 100644 index 00000000..e084e840 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd070.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd092.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd092.png new file mode 100644 index 00000000..49570eeb Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd092.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd102.png b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd102.png new file mode 100644 index 00000000..7864178a Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/datasets/examples/vd102.png differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__init__.py new file mode 100644 index 00000000..a09ede59 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__init__.py @@ -0,0 +1,67 @@ +"""This package contains modules related to objective functions, optimizations, and network architectures. + +To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. +You need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate loss, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from .base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "deep_3drecon_models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + print("model [%s] was created" % type(instance).__name__) + return instance diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..92edb66d Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/base_model.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/base_model.cpython-39.pyc new file mode 100644 index 00000000..ac35b2c3 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/base_model.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/bfm.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/bfm.cpython-39.pyc new file mode 100644 index 00000000..3408d979 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/bfm.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/facerecon_model.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/facerecon_model.cpython-39.pyc new file mode 100644 index 00000000..0890bd7a Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/facerecon_model.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/losses.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/losses.cpython-39.pyc new file mode 100644 index 00000000..c0f46069 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/losses.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/networks.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/networks.cpython-39.pyc new file mode 100644 index 00000000..20c77c0b Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/__pycache__/networks.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/README.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/README.md new file mode 100644 index 00000000..8d391f63 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/README.md @@ -0,0 +1,218 @@ +# Distributed Arcface Training in Pytorch + +The "arcface_torch" repository is the official implementation of the ArcFace algorithm. It supports distributed and sparse training with multiple distributed training examples, including several memory-saving techniques such as mixed precision training and gradient checkpointing. It also supports training for ViT models and datasets including WebFace42M and Glint360K, two of the largest open-source datasets. Additionally, the repository comes with a built-in tool for converting to ONNX format, making it easy to submit to MFR evaluation systems. + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/killing-two-birds-with-one-stone-efficient/face-verification-on-ijb-c)](https://paperswithcode.com/sota/face-verification-on-ijb-c?p=killing-two-birds-with-one-stone-efficient) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/killing-two-birds-with-one-stone-efficient/face-verification-on-ijb-b)](https://paperswithcode.com/sota/face-verification-on-ijb-b?p=killing-two-birds-with-one-stone-efficient) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/killing-two-birds-with-one-stone-efficient/face-verification-on-agedb-30)](https://paperswithcode.com/sota/face-verification-on-agedb-30?p=killing-two-birds-with-one-stone-efficient) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/killing-two-birds-with-one-stone-efficient/face-verification-on-cfp-fp)](https://paperswithcode.com/sota/face-verification-on-cfp-fp?p=killing-two-birds-with-one-stone-efficient) + +## Requirements + +To avail the latest features of PyTorch, we have upgraded to version 1.12.0. + +- Install [PyTorch](https://pytorch.org/get-started/previous-versions/) (torch>=1.12.0). +- (Optional) Install [DALI](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/), our doc for [install_dali.md](docs/install_dali.md). +- `pip install -r requirement.txt`. + +## How to Training + +To train a model, execute the `train.py` script with the path to the configuration files. The sample commands provided below demonstrate the process of conducting distributed training. + +### 1. To run on one GPU: + +```shell +python train_v2.py configs/ms1mv3_r50_onegpu +``` + +Note: +It is not recommended to use a single GPU for training, as this may result in longer training times and suboptimal performance. For best results, we suggest using multiple GPUs or a GPU cluster. + + +### 2. To run on a machine with 8 GPUs: + +```shell +torchrun --nproc_per_node=8 train.py configs/ms1mv3_r50 +``` + +### 3. To run on 2 machines with 8 GPUs each: + +Node 0: + +```shell +torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="ip1" --master_port=12581 train.py configs/wf42m_pfc02_16gpus_r100 +``` + +Node 1: + +```shell +torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="ip1" --master_port=12581 train.py configs/wf42m_pfc02_16gpus_r100 +``` + +### 4. Run ViT-B on a machine with 24k batchsize: + +```shell +torchrun --nproc_per_node=8 train_v2.py configs/wf42m_pfc03_40epoch_8gpu_vit_b +``` + + +## Download Datasets or Prepare Datasets +- [MS1MV2](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_#ms1m-arcface-85k-ids58m-images-57) (87k IDs, 5.8M images) +- [MS1MV3](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_#ms1m-retinaface) (93k IDs, 5.2M images) +- [Glint360K](https://github.com/deepinsight/insightface/tree/master/recognition/partial_fc#4-download) (360k IDs, 17.1M images) +- [WebFace42M](docs/prepare_webface42m.md) (2M IDs, 42.5M images) +- [Your Dataset, Click Here!](docs/prepare_custom_dataset.md) + +Note: +If you want to use DALI for data reading, please use the script 'scripts/shuffle_rec.py' to shuffle the InsightFace style rec before using it. +Example: + +`python scripts/shuffle_rec.py ms1m-retinaface-t1` + +You will get the "shuffled_ms1m-retinaface-t1" folder, where the samples in the "train.rec" file are shuffled. + + +## Model Zoo + +- The models are available for non-commercial research purposes only. +- All models can be found in here. +- [Baidu Yun Pan](https://pan.baidu.com/s/1CL-l4zWqsI1oDuEEYVhj-g): e8pw +- [OneDrive](https://1drv.ms/u/s!AswpsDO2toNKq0lWY69vN58GR6mw?e=p9Ov5d) + +### Performance on IJB-C and [**ICCV2021-MFR**](https://github.com/deepinsight/insightface/blob/master/challenges/mfr/README.md) + +ICCV2021-MFR testset consists of non-celebrities so we can ensure that it has very few overlap with public available face +recognition training set, such as MS1M and CASIA as they mostly collected from online celebrities. +As the result, we can evaluate the FAIR performance for different algorithms. + +For **ICCV2021-MFR-ALL** set, TAR is measured on all-to-all 1:1 protocal, with FAR less than 0.000001(e-6). The +globalised multi-racial testset contains 242,143 identities and 1,624,305 images. + + +#### 1. Training on Single-Host GPU + +| Datasets | Backbone | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) | log | +|:---------------|:--------------------|:------------|:------------|:------------|:------------------------------------------------------------------------------------------------------------------------------------| +| MS1MV2 | mobilefacenet-0.45G | 62.07 | 93.61 | 90.28 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv2_mbf/training.log) | +| MS1MV2 | r50 | 75.13 | 95.97 | 94.07 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv2_r50/training.log) | +| MS1MV2 | r100 | 78.12 | 96.37 | 94.27 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv2_r100/training.log) | +| MS1MV3 | mobilefacenet-0.45G | 63.78 | 94.23 | 91.33 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_mbf/training.log) | +| MS1MV3 | r50 | 79.14 | 96.37 | 94.47 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_r50/training.log) | +| MS1MV3 | r100 | 81.97 | 96.85 | 95.02 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_r100/training.log) | +| Glint360K | mobilefacenet-0.45G | 70.18 | 95.04 | 92.62 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_mbf/training.log) | +| Glint360K | r50 | 86.34 | 97.16 | 95.81 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_r50/training.log) | +| Glint360k | r100 | 89.52 | 97.55 | 96.38 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_r100/training.log) | +| WF4M | r100 | 89.87 | 97.19 | 95.48 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf4m_r100/training.log) | +| WF12M-PFC-0.2 | r100 | 94.75 | 97.60 | 95.90 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf12m_pfc02_r100/training.log) | +| WF12M-PFC-0.3 | r100 | 94.71 | 97.64 | 96.01 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf12m_pfc03_r100/training.log) | +| WF12M | r100 | 94.69 | 97.59 | 95.97 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf12m_r100/training.log) | +| WF42M-PFC-0.2 | r100 | 96.27 | 97.70 | 96.31 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf42m_pfc02_r100/training.log) | +| WF42M-PFC-0.2 | ViT-T-1.5G | 92.04 | 97.27 | 95.68 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/wf42m_pfc02_40epoch_8gpu_vit_t/training.log) | +| WF42M-PFC-0.3 | ViT-B-11G | 97.16 | 97.91 | 97.05 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/pfc03_wf42m_vit_b_8gpu/training.log) | + +#### 2. Training on Multi-Host GPU + +| Datasets | Backbone(bs*gpus) | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) | Throughout | log | +|:-----------------|:------------------|:------------|:------------|:------------|:-----------|:-------------------------------------------------------------------------------------------------------------------------------------------| +| WF42M-PFC-0.2 | r50(512*8) | 93.83 | 97.53 | 96.16 | ~5900 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/webface42m_r50_bs4k_pfc02/training.log) | +| WF42M-PFC-0.2 | r50(512*16) | 93.96 | 97.46 | 96.12 | ~11000 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/webface42m_r50_lr01_pfc02_bs8k_16gpus/training.log) | +| WF42M-PFC-0.2 | r50(128*32) | 94.04 | 97.48 | 95.94 | ~17000 | click me | +| WF42M-PFC-0.2 | r100(128*16) | 96.28 | 97.80 | 96.57 | ~5200 | click me | +| WF42M-PFC-0.2 | r100(256*16) | 96.69 | 97.85 | 96.63 | ~5200 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/webface42m_r100_bs4k_pfc02/training.log) | +| WF42M-PFC-0.0018 | r100(512*32) | 93.08 | 97.51 | 95.88 | ~10000 | click me | +| WF42M-PFC-0.2 | r100(128*32) | 96.57 | 97.83 | 96.50 | ~9800 | click me | + +`r100(128*32)` means backbone is r100, batchsize per gpu is 128, the number of gpus is 32. + + + +#### 3. ViT For Face Recognition + +| Datasets | Backbone(bs) | FLOPs | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) | Throughout | log | +|:--------------|:--------------|:------|:------------|:------------|:------------|:-----------|:-----------------------------------------------------------------------------------------------------------------------------| +| WF42M-PFC-0.3 | r18(128*32) | 2.6 | 79.13 | 95.77 | 93.36 | - | click me | +| WF42M-PFC-0.3 | r50(128*32) | 6.3 | 94.03 | 97.48 | 95.94 | - | click me | +| WF42M-PFC-0.3 | r100(128*32) | 12.1 | 96.69 | 97.82 | 96.45 | - | click me | +| WF42M-PFC-0.3 | r200(128*32) | 23.5 | 97.70 | 97.97 | 96.93 | - | click me | +| WF42M-PFC-0.3 | VIT-T(384*64) | 1.5 | 92.24 | 97.31 | 95.97 | ~35000 | click me | +| WF42M-PFC-0.3 | VIT-S(384*64) | 5.7 | 95.87 | 97.73 | 96.57 | ~25000 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/pfc03_wf42m_vit_s_64gpu/training.log) | +| WF42M-PFC-0.3 | VIT-B(384*64) | 11.4 | 97.42 | 97.90 | 97.04 | ~13800 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/pfc03_wf42m_vit_b_64gpu/training.log) | +| WF42M-PFC-0.3 | VIT-L(384*64) | 25.3 | 97.85 | 98.00 | 97.23 | ~9406 | [click me](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/pfc03_wf42m_vit_l_64gpu/training.log) | + +`WF42M` means WebFace42M, `PFC-0.3` means negivate class centers sample rate is 0.3. + +#### 4. Noisy Datasets + +| Datasets | Backbone | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) | log | +|:-------------------------|:---------|:------------|:------------|:------------|:---------| +| WF12M-Flip(40%) | r50 | 43.87 | 88.35 | 80.78 | click me | +| WF12M-Flip(40%)-PFC-0.1* | r50 | 80.20 | 96.11 | 93.79 | click me | +| WF12M-Conflict | r50 | 79.93 | 95.30 | 91.56 | click me | +| WF12M-Conflict-PFC-0.3* | r50 | 91.68 | 97.28 | 95.75 | click me | + +`WF12M` means WebFace12M, `+PFC-0.1*` denotes additional abnormal inter-class filtering. + + + +## Speed Benchmark +
+ + +**Arcface-Torch** is an efficient tool for training large-scale face recognition training sets. When the number of classes in the training sets exceeds one million, the partial FC sampling strategy maintains the same accuracy while providing several times faster training performance and lower GPU memory utilization. The partial FC is a sparse variant of the model parallel architecture for large-scale face recognition, utilizing a sparse softmax that dynamically samples a subset of class centers for each training batch. During each iteration, only a sparse portion of the parameters are updated, leading to a significant reduction in GPU memory requirements and computational demands. With the partial FC approach, it is possible to train sets with up to 29 million identities, the largest to date. Furthermore, the partial FC method supports multi-machine distributed training and mixed precision training. + + + +More details see +[speed_benchmark.md](docs/speed_benchmark.md) in docs. + +> 1. Training Speed of Various Parallel Techniques (Samples per Second) on a Tesla V100 32GB x 8 System (Higher is Optimal) + +`-` means training failed because of gpu memory limitations. + +| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | +|:--------------------------------|:--------------|:---------------|:---------------| +| 125000 | 4681 | 4824 | 5004 | +| 1400000 | **1672** | 3043 | 4738 | +| 5500000 | **-** | **1389** | 3975 | +| 8000000 | **-** | **-** | 3565 | +| 16000000 | **-** | **-** | 2679 | +| 29000000 | **-** | **-** | **1855** | + +> 2. GPU Memory Utilization of Various Parallel Techniques (MB per GPU) on a Tesla V100 32GB x 8 System (Lower is Optimal) + +| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | +|:--------------------------------|:--------------|:---------------|:---------------| +| 125000 | 7358 | 5306 | 4868 | +| 1400000 | 32252 | 11178 | 6056 | +| 5500000 | **-** | 32188 | 9854 | +| 8000000 | **-** | **-** | 12310 | +| 16000000 | **-** | **-** | 19950 | +| 29000000 | **-** | **-** | 32324 | + + +## Citations + +``` +@inproceedings{deng2019arcface, + title={Arcface: Additive angular margin loss for deep face recognition}, + author={Deng, Jiankang and Guo, Jia and Xue, Niannan and Zafeiriou, Stefanos}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={4690--4699}, + year={2019} +} +@inproceedings{An_2022_CVPR, + author={An, Xiang and Deng, Jiankang and Guo, Jia and Feng, Ziyong and Zhu, XuHan and Yang, Jing and Liu, Tongliang}, + title={Killing Two Birds With One Stone: Efficient and Robust Training of Face Recognition CNNs by Partial FC}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + month={June}, + year={2022}, + pages={4042-4051} +} +@inproceedings{zhu2021webface260m, + title={Webface260m: A benchmark unveiling the power of million-scale deep face recognition}, + author={Zhu, Zheng and Huang, Guan and Deng, Jiankang and Ye, Yun and Huang, Junjie and Chen, Xinze and Zhu, Jiagang and Yang, Tian and Lu, Jiwen and Du, Dalong and Zhou, Jie}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10492--10502}, + year={2021} +} +``` diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__init__.py new file mode 100644 index 00000000..6cea70df --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__init__.py @@ -0,0 +1,85 @@ +from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200 +from .mobilefacenet import get_mbf + + +def get_model(name, **kwargs): + # resnet + if name == "r18": + return iresnet18(False, **kwargs) + elif name == "r34": + return iresnet34(False, **kwargs) + elif name == "r50": + return iresnet50(False, **kwargs) + elif name == "r100": + return iresnet100(False, **kwargs) + elif name == "r200": + return iresnet200(False, **kwargs) + elif name == "r2060": + from .iresnet2060 import iresnet2060 + return iresnet2060(False, **kwargs) + + elif name == "mbf": + fp16 = kwargs.get("fp16", False) + num_features = kwargs.get("num_features", 512) + return get_mbf(fp16=fp16, num_features=num_features) + + elif name == "mbf_large": + from .mobilefacenet import get_mbf_large + fp16 = kwargs.get("fp16", False) + num_features = kwargs.get("num_features", 512) + return get_mbf_large(fp16=fp16, num_features=num_features) + + elif name == "vit_t": + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=256, depth=12, + num_heads=8, drop_path_rate=0.1, norm_layer="ln", mask_ratio=0.1) + + elif name == "vit_t_dp005_mask0": # For WebFace42M + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=256, depth=12, + num_heads=8, drop_path_rate=0.05, norm_layer="ln", mask_ratio=0.0) + + elif name == "vit_s": + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=512, depth=12, + num_heads=8, drop_path_rate=0.1, norm_layer="ln", mask_ratio=0.1) + + elif name == "vit_s_dp005_mask_0": # For WebFace42M + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=512, depth=12, + num_heads=8, drop_path_rate=0.05, norm_layer="ln", mask_ratio=0.0) + + elif name == "vit_b": + # this is a feature + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=512, depth=24, + num_heads=8, drop_path_rate=0.1, norm_layer="ln", mask_ratio=0.1, using_checkpoint=True) + + elif name == "vit_b_dp005_mask_005": # For WebFace42M + # this is a feature + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=512, depth=24, + num_heads=8, drop_path_rate=0.05, norm_layer="ln", mask_ratio=0.05, using_checkpoint=True) + + elif name == "vit_l_dp005_mask_005": # For WebFace42M + # this is a feature + num_features = kwargs.get("num_features", 512) + from .vit import VisionTransformer + return VisionTransformer( + img_size=112, patch_size=9, num_classes=num_features, embed_dim=768, depth=24, + num_heads=8, drop_path_rate=0.05, norm_layer="ln", mask_ratio=0.05, using_checkpoint=True) + + else: + raise ValueError() diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..6005b34c Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/iresnet.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/iresnet.cpython-39.pyc new file mode 100644 index 00000000..a5a95b53 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/iresnet.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/mobilefacenet.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/mobilefacenet.cpython-39.pyc new file mode 100644 index 00000000..b994f4dc Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/__pycache__/mobilefacenet.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet.py new file mode 100644 index 00000000..6f2347c9 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet.py @@ -0,0 +1,194 @@ +import torch +from torch import nn +from torch.utils.checkpoint import checkpoint + +__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200'] +using_ckpt = False + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + groups=groups, + bias=False, + dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=1, + stride=stride, + bias=False) + + +class IBasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None, + groups=1, base_width=64, dilation=1): + super(IBasicBlock, self).__init__() + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,) + self.conv1 = conv3x3(inplanes, planes) + self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,) + self.prelu = nn.PReLU(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,) + self.downsample = downsample + self.stride = stride + + def forward_impl(self, x): + identity = x + out = self.bn1(x) + out = self.conv1(out) + out = self.bn2(out) + out = self.prelu(out) + out = self.conv2(out) + out = self.bn3(out) + if self.downsample is not None: + identity = self.downsample(x) + out += identity + return out + + def forward(self, x): + if self.training and using_ckpt: + return checkpoint(self.forward_impl, x) + else: + return self.forward_impl(x) + + +class IResNet(nn.Module): + fc_scale = 7 * 7 + def __init__(self, + block, layers, dropout=0, num_features=512, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): + super(IResNet, self).__init__() + self.extra_gflops = 0.0 + self.fp16 = fp16 + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) + self.prelu = nn.PReLU(self.inplanes) + self.layer1 = self._make_layer(block, 64, layers[0], stride=2) + self.layer2 = self._make_layer(block, + 128, + layers[1], + stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, + 256, + layers[2], + stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, + 512, + layers[3], + stride=2, + dilate=replace_stride_with_dilation[2]) + self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,) + self.dropout = nn.Dropout(p=dropout, inplace=True) + self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) + self.features = nn.BatchNorm1d(num_features, eps=1e-05) + nn.init.constant_(self.features.weight, 1.0) + self.features.weight.requires_grad = False + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 0.1) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, IBasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), + ) + layers = [] + layers.append( + block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation)) + + return nn.Sequential(*layers) + + def forward(self, x): + with torch.cuda.amp.autocast(self.fp16): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.bn2(x) + x = torch.flatten(x, 1) + x = self.dropout(x) + x = self.fc(x.float() if self.fp16 else x) + x = self.features(x) + return x + + +def _iresnet(arch, block, layers, pretrained, progress, **kwargs): + model = IResNet(block, layers, **kwargs) + if pretrained: + raise ValueError() + return model + + +def iresnet18(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained, + progress, **kwargs) + + +def iresnet34(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained, + progress, **kwargs) + + +def iresnet50(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained, + progress, **kwargs) + + +def iresnet100(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, + progress, **kwargs) + + +def iresnet200(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained, + progress, **kwargs) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet2060.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet2060.py new file mode 100644 index 00000000..21d11221 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/iresnet2060.py @@ -0,0 +1,176 @@ +import torch +from torch import nn + +assert torch.__version__ >= "1.8.1" +from torch.utils.checkpoint import checkpoint_sequential + +__all__ = ['iresnet2060'] + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + groups=groups, + bias=False, + dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, + out_planes, + kernel_size=1, + stride=stride, + bias=False) + + +class IBasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + groups=1, base_width=64, dilation=1): + super(IBasicBlock, self).__init__() + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, ) + self.conv1 = conv3x3(inplanes, planes) + self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, ) + self.prelu = nn.PReLU(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, ) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + out = self.bn1(x) + out = self.conv1(out) + out = self.bn2(out) + out = self.prelu(out) + out = self.conv2(out) + out = self.bn3(out) + if self.downsample is not None: + identity = self.downsample(x) + out += identity + return out + + +class IResNet(nn.Module): + fc_scale = 7 * 7 + + def __init__(self, + block, layers, dropout=0, num_features=512, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): + super(IResNet, self).__init__() + self.fp16 = fp16 + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) + self.prelu = nn.PReLU(self.inplanes) + self.layer1 = self._make_layer(block, 64, layers[0], stride=2) + self.layer2 = self._make_layer(block, + 128, + layers[1], + stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, + 256, + layers[2], + stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, + 512, + layers[3], + stride=2, + dilate=replace_stride_with_dilation[2]) + self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, ) + self.dropout = nn.Dropout(p=dropout, inplace=True) + self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) + self.features = nn.BatchNorm1d(num_features, eps=1e-05) + nn.init.constant_(self.features.weight, 1.0) + self.features.weight.requires_grad = False + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 0.1) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, IBasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), + ) + layers = [] + layers.append( + block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.inplanes, + planes, + groups=self.groups, + base_width=self.base_width, + dilation=self.dilation)) + + return nn.Sequential(*layers) + + def checkpoint(self, func, num_seg, x): + if self.training: + return checkpoint_sequential(func, num_seg, x) + else: + return func(x) + + def forward(self, x): + with torch.cuda.amp.autocast(self.fp16): + x = self.conv1(x) + x = self.bn1(x) + x = self.prelu(x) + x = self.layer1(x) + x = self.checkpoint(self.layer2, 20, x) + x = self.checkpoint(self.layer3, 100, x) + x = self.layer4(x) + x = self.bn2(x) + x = torch.flatten(x, 1) + x = self.dropout(x) + x = self.fc(x.float() if self.fp16 else x) + x = self.features(x) + return x + + +def _iresnet(arch, block, layers, pretrained, progress, **kwargs): + model = IResNet(block, layers, **kwargs) + if pretrained: + raise ValueError() + return model + + +def iresnet2060(pretrained=False, progress=True, **kwargs): + return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/mobilefacenet.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/mobilefacenet.py new file mode 100644 index 00000000..007d136a --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/mobilefacenet.py @@ -0,0 +1,147 @@ +''' +Adapted from https://github.com/cavalleria/cavaface.pytorch/blob/master/backbone/mobilefacenet.py +Original author cavalleria +''' + +import torch.nn as nn +from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Sequential, Module +import torch + + +class Flatten(Module): + def forward(self, x): + return x.view(x.size(0), -1) + + +class ConvBlock(Module): + def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): + super(ConvBlock, self).__init__() + self.layers = nn.Sequential( + Conv2d(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False), + BatchNorm2d(num_features=out_c), + PReLU(num_parameters=out_c) + ) + + def forward(self, x): + return self.layers(x) + + +class LinearBlock(Module): + def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1): + super(LinearBlock, self).__init__() + self.layers = nn.Sequential( + Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False), + BatchNorm2d(num_features=out_c) + ) + + def forward(self, x): + return self.layers(x) + + +class DepthWise(Module): + def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1): + super(DepthWise, self).__init__() + self.residual = residual + self.layers = nn.Sequential( + ConvBlock(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)), + ConvBlock(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride), + LinearBlock(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)) + ) + + def forward(self, x): + short_cut = None + if self.residual: + short_cut = x + x = self.layers(x) + if self.residual: + output = short_cut + x + else: + output = x + return output + + +class Residual(Module): + def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)): + super(Residual, self).__init__() + modules = [] + for _ in range(num_block): + modules.append(DepthWise(c, c, True, kernel, stride, padding, groups)) + self.layers = Sequential(*modules) + + def forward(self, x): + return self.layers(x) + + +class GDC(Module): + def __init__(self, embedding_size): + super(GDC, self).__init__() + self.layers = nn.Sequential( + LinearBlock(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0)), + Flatten(), + Linear(512, embedding_size, bias=False), + BatchNorm1d(embedding_size)) + + def forward(self, x): + return self.layers(x) + + +class MobileFaceNet(Module): + def __init__(self, fp16=False, num_features=512, blocks=(1, 4, 6, 2), scale=2): + super(MobileFaceNet, self).__init__() + self.scale = scale + self.fp16 = fp16 + self.layers = nn.ModuleList() + self.layers.append( + ConvBlock(3, 64 * self.scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1)) + ) + if blocks[0] == 1: + self.layers.append( + ConvBlock(64 * self.scale, 64 * self.scale, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64) + ) + else: + self.layers.append( + Residual(64 * self.scale, num_block=blocks[0], groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)), + ) + + self.layers.extend( + [ + DepthWise(64 * self.scale, 64 * self.scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128), + Residual(64 * self.scale, num_block=blocks[1], groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1)), + DepthWise(64 * self.scale, 128 * self.scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256), + Residual(128 * self.scale, num_block=blocks[2], groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)), + DepthWise(128 * self.scale, 128 * self.scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512), + Residual(128 * self.scale, num_block=blocks[3], groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1)), + ]) + + self.conv_sep = ConvBlock(128 * self.scale, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0)) + self.features = GDC(num_features) + self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + with torch.cuda.amp.autocast(self.fp16): + for func in self.layers: + x = func(x) + x = self.conv_sep(x.float() if self.fp16 else x) + x = self.features(x) + return x + + +def get_mbf(fp16, num_features, blocks=(1, 4, 6, 2), scale=2): + return MobileFaceNet(fp16, num_features, blocks, scale=scale) + +def get_mbf_large(fp16, num_features, blocks=(2, 8, 12, 4), scale=4): + return MobileFaceNet(fp16, num_features, blocks, scale=scale) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/vit.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/vit.py new file mode 100644 index 00000000..23977d2e --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/backbones/vit.py @@ -0,0 +1,280 @@ +import torch +import torch.nn as nn +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from typing import Optional, Callable + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU6, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class VITBatchNorm(nn.Module): + def __init__(self, num_features): + super().__init__() + self.num_features = num_features + self.bn = nn.BatchNorm1d(num_features=num_features) + + def forward(self, x): + return self.bn(x) + + +class Attention(nn.Module): + def __init__(self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_scale: Optional[None] = None, + attn_drop: float = 0., + proj_drop: float = 0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + + with torch.cuda.amp.autocast(True): + batch_size, num_token, embed_dim = x.shape + #qkv is [3,batch_size,num_heads,num_token, embed_dim//num_heads] + qkv = self.qkv(x).reshape( + batch_size, num_token, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) + with torch.cuda.amp.autocast(False): + q, k, v = qkv[0].float(), qkv[1].float(), qkv[2].float() + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(batch_size, num_token, embed_dim) + with torch.cuda.amp.autocast(True): + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, + dim: int, + num_heads: int, + num_patches: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_scale: Optional[None] = None, + drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: Callable = nn.ReLU6, + norm_layer: str = "ln", + patch_n: int = 144): + super().__init__() + + if norm_layer == "bn": + self.norm1 = VITBatchNorm(num_features=num_patches) + self.norm2 = VITBatchNorm(num_features=num_patches) + elif norm_layer == "ln": + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, + act_layer=act_layer, drop=drop) + self.extra_gflops = (num_heads * patch_n * (dim//num_heads)*patch_n * 2) / (1000**3) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + with torch.cuda.amp.autocast(True): + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + def __init__(self, img_size=108, patch_size=9, in_channels=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * \ + (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv2d(in_channels, embed_dim, + kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + batch_size, channels, height, width = x.shape + assert height == self.img_size[0] and width == self.img_size[1], \ + f"Input image size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size: int = 112, + patch_size: int = 16, + in_channels: int = 3, + num_classes: int = 1000, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4., + qkv_bias: bool = False, + qk_scale: Optional[None] = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + hybrid_backbone: Optional[None] = None, + norm_layer: str = "ln", + mask_ratio = 0.1, + using_checkpoint = False, + ): + super().__init__() + self.num_classes = num_classes + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + + if hybrid_backbone is not None: + raise ValueError + else: + self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_channels=in_channels, embed_dim=embed_dim) + self.mask_ratio = mask_ratio + self.using_checkpoint = using_checkpoint + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + patch_n = (img_size//patch_size)**2 + self.blocks = nn.ModuleList( + [ + Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + num_patches=num_patches, patch_n=patch_n) + for i in range(depth)] + ) + self.extra_gflops = 0.0 + for _block in self.blocks: + self.extra_gflops += _block.extra_gflops + + if norm_layer == "ln": + self.norm = nn.LayerNorm(embed_dim) + elif norm_layer == "bn": + self.norm = VITBatchNorm(self.num_patches) + + # features head + self.feature = nn.Sequential( + nn.Linear(in_features=embed_dim * num_patches, out_features=embed_dim, bias=False), + nn.BatchNorm1d(num_features=embed_dim, eps=2e-5), + nn.Linear(in_features=embed_dim, out_features=num_classes, bias=False), + nn.BatchNorm1d(num_features=num_classes, eps=2e-5) + ) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + torch.nn.init.normal_(self.mask_token, std=.02) + trunc_normal_(self.pos_embed, std=.02) + # trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def random_masking(self, x, mask_ratio=0.1): + """ + Perform per-sample random masking by per-sample shuffling. + Per-sample shuffling is done by argsort random noise. + x: [N, L, D], sequence + """ + N, L, D = x.size() # batch, length, dim + len_keep = int(L * (1 - mask_ratio)) + + noise = torch.rand(N, L, device=x.device) # noise in [0, 1] + + # sort noise for each sample + # ascend: small is keep, large is remove + ids_shuffle = torch.argsort(noise, dim=1) + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + x_masked = torch.gather( + x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=x.device) + mask[:, :len_keep] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return x_masked, mask, ids_restore + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + + if self.training and self.mask_ratio > 0: + x, _, ids_restore = self.random_masking(x) + + for func in self.blocks: + if self.using_checkpoint and self.training: + from torch.utils.checkpoint import checkpoint + x = checkpoint(func, x) + else: + x = func(x) + x = self.norm(x.float()) + + if self.training and self.mask_ratio > 0: + mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] - x.shape[1], 1) + x_ = torch.cat([x[:, :, :], mask_tokens], dim=1) # no cls token + x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle + x = x_ + return torch.reshape(x, (B, self.num_patches * self.embed_dim)) + + def forward(self, x): + x = self.forward_features(x) + x = self.feature(x) + return x diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/3millions.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/3millions.py new file mode 100644 index 00000000..6bb660bd --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/3millions.py @@ -0,0 +1,23 @@ +from easydict import EasyDict as edict + +# configs for test speed + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.1 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 512 # total_batch_size = batch_size * num_gpus +config.lr = 0.1 # batch size is 512 + +config.rec = "synthetic" +config.num_classes = 30 * 10000 +config.num_image = 100000 +config.num_epoch = 30 +config.warmup_epoch = -1 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/base.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/base.py new file mode 100644 index 00000000..c64c943e --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/base.py @@ -0,0 +1,59 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() + +# Margin Base Softmax +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r50" +config.resume = False +config.save_all_states = False +config.output = "ms1mv3_arcface_r50" + +config.embedding_size = 512 + +# Partial FC +config.sample_rate = 1 +config.interclass_filtering_threshold = 0 + +config.fp16 = False +config.batch_size = 128 + +# For SGD +config.optimizer = "sgd" +config.lr = 0.1 +config.momentum = 0.9 +config.weight_decay = 5e-4 + +# For AdamW +# config.optimizer = "adamw" +# config.lr = 0.001 +# config.weight_decay = 0.1 + +config.verbose = 2000 +config.frequent = 10 + +# For Large Sacle Dataset, such as WebFace42M +config.dali = False + +# Gradient ACC +config.gradient_acc = 1 + +# setup seed +config.seed = 2048 + +# dataload numworkers +config.num_workers = 2 + +# WandB Logger +config.wandb_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +config.suffix_run_name = None +config.using_wandb = False +config.wandb_entity = "entity" +config.wandb_project = "project" +config.wandb_log_all = True +config.save_artifacts = False +config.wandb_resume = False # resume wandb run: Only if the you wand t resume the last run that it was interrupted \ No newline at end of file diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_mbf.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_mbf.py new file mode 100644 index 00000000..b32f0016 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_mbf.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/glint360k" +config.num_classes = 360232 +config.num_image = 17091657 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r100.py new file mode 100644 index 00000000..3b8bbb78 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/glint360k" +config.num_classes = 360232 +config.num_image = 17091657 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r50.py new file mode 100644 index 00000000..4eeb28f8 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/glint360k_r50.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/glint360k" +config.num_classes = 360232 +config.num_image = 17091657 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_mbf.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_mbf.py new file mode 100644 index 00000000..255a51ad --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_mbf.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/faces_emore" +config.num_classes = 85742 +config.num_image = 5822653 +config.num_epoch = 40 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r100.py new file mode 100644 index 00000000..36773489 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/faces_emore" +config.num_classes = 85742 +config.num_image = 5822653 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r50.py new file mode 100644 index 00000000..2dab4d35 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv2_r50.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/faces_emore" +config.num_classes = 85742 +config.num_image = 5822653 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_mbf.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_mbf.py new file mode 100644 index 00000000..731b4a26 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_mbf.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/ms1m-retinaface-t1" +config.num_classes = 93431 +config.num_image = 5179510 +config.num_epoch = 40 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r100.py new file mode 100644 index 00000000..e7af3cef --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/ms1m-retinaface-t1" +config.num_classes = 93431 +config.num_image = 5179510 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50.py new file mode 100644 index 00000000..f1467f0a --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/ms1m-retinaface-t1" +config.num_classes = 93431 +config.num_image = 5179510 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50_onegpu.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50_onegpu.py new file mode 100644 index 00000000..1ce7e140 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/ms1mv3_r50_onegpu.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.5, 0.0) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.02 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/ms1m-retinaface-t1" +config.num_classes = 93431 +config.num_image = 5179510 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50.py new file mode 100644 index 00000000..de94fcb3 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M_Conflict" +config.num_classes = 1017970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py new file mode 100644 index 00000000..a766f415 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_conflict_r50_pfc03_filter04.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.interclass_filtering_threshold = 0.4 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M_Conflict" +config.num_classes = 1017970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_pfc01_filter04_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_pfc01_filter04_r50.py new file mode 100644 index 00000000..2c1018b7 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_pfc01_filter04_r50.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.1 +config.interclass_filtering_threshold = 0.4 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M_FLIP40" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_r50.py new file mode 100644 index 00000000..fde56fed --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_flip_r50.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M_FLIP40" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_mbf.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_mbf.py new file mode 100644 index 00000000..d1cb93b2 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_mbf.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 1e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_pfc02_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_pfc02_r100.py new file mode 100644 index 00000000..1062b876 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_pfc02_r100.py @@ -0,0 +1,29 @@ + +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r100.py new file mode 100644 index 00000000..65bfa1be --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r100.py @@ -0,0 +1,29 @@ + +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r50.py new file mode 100644 index 00000000..2a728466 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf12m_r50.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.interclass_filtering_threshold = 0 +config.fp16 = True +config.weight_decay = 5e-4 +config.batch_size = 128 +config.optimizer = "sgd" +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace12M" +config.num_classes = 617970 +config.num_image = 12720066 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc0008_32gpu_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc0008_32gpu_r100.py new file mode 100644 index 00000000..2885816c --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc0008_32gpu_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 512 +config.lr = 0.4 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py new file mode 100644 index 00000000..14a6bb79 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_mbf_bs8k.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 512 +config.lr = 0.4 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 2 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r100.py new file mode 100644 index 00000000..03568473 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 256 +config.lr = 0.3 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 1 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py new file mode 100644 index 00000000..c02bdf3a --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_16gpus_r50_bs8k.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 512 +config.lr = 0.6 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 4 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_32gpus_r50_bs4k.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_32gpus_r50_bs4k.py new file mode 100644 index 00000000..5e840794 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_32gpus_r50_bs4k.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 2 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_8gpus_r50_bs4k.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_8gpus_r50_bs4k.py new file mode 100644 index 00000000..b9f627fa --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_8gpus_r50_bs4k.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 512 +config.lr = 0.4 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 2 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100.py new file mode 100644 index 00000000..5274a52f --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_16gpus.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_16gpus.py new file mode 100644 index 00000000..c1e8f199 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_16gpus.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.2 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_32gpus.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_32gpus.py new file mode 100644 index 00000000..f7787675 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc02_r100_32gpus.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.2 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 10000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py new file mode 100644 index 00000000..adf21c97 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r18.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r18.py new file mode 100644 index 00000000..5d35830b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r18.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r18" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r200.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r200.py new file mode 100644 index 00000000..e34dd1c1 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r200.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r200" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r50.py new file mode 100644 index 00000000..a44a5d77 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_32gpu_r50.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.4 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 20 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_b.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_b.py new file mode 100644 index 00000000..cbe7fe6b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_b.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_b_dp005_mask_005" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 384 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_l.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_l.py new file mode 100644 index 00000000..45b153aa --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_l.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_l_dp005_mask_005" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 384 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_s.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_s.py new file mode 100644 index 00000000..f6ce7010 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_s.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_s_dp005_mask_0" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 384 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_t.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_t.py new file mode 100644 index 00000000..8516755b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_64gpu_vit_t.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_t_dp005_mask0" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 384 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py new file mode 100644 index 00000000..37105d45 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py @@ -0,0 +1,28 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_b_dp005_mask_005" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 256 +config.gradient_acc = 12 # total batchsize is 256 * 12 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_t.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_t.py new file mode 100644 index 00000000..5bf8c563 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_t.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "vit_t_dp005_mask0" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 0.3 +config.fp16 = True +config.weight_decay = 0.1 +config.batch_size = 512 +config.optimizer = "adamw" +config.lr = 0.001 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace42M" +config.num_classes = 2059906 +config.num_image = 42474557 +config.num_epoch = 40 +config.warmup_epoch = config.num_epoch // 10 +config.val_targets = [] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_mbf.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_mbf.py new file mode 100644 index 00000000..2550f5a6 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_mbf.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "mbf" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 1e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace4M" +config.num_classes = 205990 +config.num_image = 4235242 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r100.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r100.py new file mode 100644 index 00000000..7e95e783 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r100.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r100" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace4M" +config.num_classes = 205990 +config.num_image = 4235242 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r50.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r50.py new file mode 100644 index 00000000..b3eb0d84 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/configs/wf4m_r50.py @@ -0,0 +1,27 @@ +from easydict import EasyDict as edict + +# make training faster +# our RAM is 256G +# mount -t tmpfs -o size=140G tmpfs /train_tmp + +config = edict() +config.margin_list = (1.0, 0.0, 0.4) +config.network = "r50" +config.resume = False +config.output = None +config.embedding_size = 512 +config.sample_rate = 1.0 +config.fp16 = True +config.momentum = 0.9 +config.weight_decay = 5e-4 +config.batch_size = 128 +config.lr = 0.1 +config.verbose = 2000 +config.dali = False + +config.rec = "/train_tmp/WebFace4M" +config.num_classes = 205990 +config.num_image = 4235242 +config.num_epoch = 20 +config.warmup_epoch = 0 +config.val_targets = ['lfw', 'cfp_fp', "agedb_30"] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dataset.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dataset.py new file mode 100644 index 00000000..f1b51797 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dataset.py @@ -0,0 +1,245 @@ +import numbers +import os +import queue as Queue +import threading +from typing import Iterable + +import mxnet as mx +import numpy as np +import torch +from functools import partial +from torch import distributed +from torch.utils.data import DataLoader, Dataset +from torchvision import transforms +from torchvision.datasets import ImageFolder +from utils.utils_distributed_sampler import DistributedSampler +from utils.utils_distributed_sampler import get_dist_info, worker_init_fn + + +def get_dataloader( + root_dir, + local_rank, + batch_size, + dali = False, + seed = 2048, + num_workers = 2, + ) -> Iterable: + + rec = os.path.join(root_dir, 'train.rec') + idx = os.path.join(root_dir, 'train.idx') + train_set = None + + # Synthetic + if root_dir == "synthetic": + train_set = SyntheticDataset() + dali = False + + # Mxnet RecordIO + elif os.path.exists(rec) and os.path.exists(idx): + train_set = MXFaceDataset(root_dir=root_dir, local_rank=local_rank) + + # Image Folder + else: + transform = transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + train_set = ImageFolder(root_dir, transform) + + # DALI + if dali: + return dali_data_iter( + batch_size=batch_size, rec_file=rec, idx_file=idx, + num_threads=2, local_rank=local_rank) + + rank, world_size = get_dist_info() + train_sampler = DistributedSampler( + train_set, num_replicas=world_size, rank=rank, shuffle=True, seed=seed) + + if seed is None: + init_fn = None + else: + init_fn = partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) + + train_loader = DataLoaderX( + local_rank=local_rank, + dataset=train_set, + batch_size=batch_size, + sampler=train_sampler, + num_workers=num_workers, + pin_memory=True, + drop_last=True, + worker_init_fn=init_fn, + ) + + return train_loader + +class BackgroundGenerator(threading.Thread): + def __init__(self, generator, local_rank, max_prefetch=6): + super(BackgroundGenerator, self).__init__() + self.queue = Queue.Queue(max_prefetch) + self.generator = generator + self.local_rank = local_rank + self.daemon = True + self.start() + + def run(self): + torch.cuda.set_device(self.local_rank) + for item in self.generator: + self.queue.put(item) + self.queue.put(None) + + def next(self): + next_item = self.queue.get() + if next_item is None: + raise StopIteration + return next_item + + def __next__(self): + return self.next() + + def __iter__(self): + return self + + +class DataLoaderX(DataLoader): + + def __init__(self, local_rank, **kwargs): + super(DataLoaderX, self).__init__(**kwargs) + self.stream = torch.cuda.Stream(local_rank) + self.local_rank = local_rank + + def __iter__(self): + self.iter = super(DataLoaderX, self).__iter__() + self.iter = BackgroundGenerator(self.iter, self.local_rank) + self.preload() + return self + + def preload(self): + self.batch = next(self.iter, None) + if self.batch is None: + return None + with torch.cuda.stream(self.stream): + for k in range(len(self.batch)): + self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True) + + def __next__(self): + torch.cuda.current_stream().wait_stream(self.stream) + batch = self.batch + if batch is None: + raise StopIteration + self.preload() + return batch + + +class MXFaceDataset(Dataset): + def __init__(self, root_dir, local_rank): + super(MXFaceDataset, self).__init__() + self.transform = transforms.Compose( + [transforms.ToPILImage(), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + self.root_dir = root_dir + self.local_rank = local_rank + path_imgrec = os.path.join(root_dir, 'train.rec') + path_imgidx = os.path.join(root_dir, 'train.idx') + self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') + s = self.imgrec.read_idx(0) + header, _ = mx.recordio.unpack(s) + if header.flag > 0: + self.header0 = (int(header.label[0]), int(header.label[1])) + self.imgidx = np.array(range(1, int(header.label[0]))) + else: + self.imgidx = np.array(list(self.imgrec.keys)) + + def __getitem__(self, index): + idx = self.imgidx[index] + s = self.imgrec.read_idx(idx) + header, img = mx.recordio.unpack(s) + label = header.label + if not isinstance(label, numbers.Number): + label = label[0] + label = torch.tensor(label, dtype=torch.long) + sample = mx.image.imdecode(img).asnumpy() + if self.transform is not None: + sample = self.transform(sample) + return sample, label + + def __len__(self): + return len(self.imgidx) + + +class SyntheticDataset(Dataset): + def __init__(self): + super(SyntheticDataset, self).__init__() + img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) + img = np.transpose(img, (2, 0, 1)) + img = torch.from_numpy(img).squeeze(0).float() + img = ((img / 255) - 0.5) / 0.5 + self.img = img + self.label = 1 + + def __getitem__(self, index): + return self.img, self.label + + def __len__(self): + return 1000000 + + +def dali_data_iter( + batch_size: int, rec_file: str, idx_file: str, num_threads: int, + initial_fill=32768, random_shuffle=True, + prefetch_queue_depth=1, local_rank=0, name="reader", + mean=(127.5, 127.5, 127.5), + std=(127.5, 127.5, 127.5)): + """ + Parameters: + ---------- + initial_fill: int + Size of the buffer that is used for shuffling. If random_shuffle is False, this parameter is ignored. + + """ + rank: int = distributed.get_rank() + world_size: int = distributed.get_world_size() + import nvidia.dali.fn as fn + import nvidia.dali.types as types + from nvidia.dali.pipeline import Pipeline + from nvidia.dali.plugin.pytorch import DALIClassificationIterator + + pipe = Pipeline( + batch_size=batch_size, num_threads=num_threads, + device_id=local_rank, prefetch_queue_depth=prefetch_queue_depth, ) + condition_flip = fn.random.coin_flip(probability=0.5) + with pipe: + jpegs, labels = fn.readers.mxnet( + path=rec_file, index_path=idx_file, initial_fill=initial_fill, + num_shards=world_size, shard_id=rank, + random_shuffle=random_shuffle, pad_last_batch=False, name=name) + images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB) + images = fn.crop_mirror_normalize( + images, dtype=types.FLOAT, mean=mean, std=std, mirror=condition_flip) + pipe.set_outputs(images, labels) + pipe.build() + return DALIWarper(DALIClassificationIterator(pipelines=[pipe], reader_name=name, )) + + +@torch.no_grad() +class DALIWarper(object): + def __init__(self, dali_iter): + self.iter = dali_iter + + def __next__(self): + data_dict = self.iter.__next__()[0] + tensor_data = data_dict['data'].cuda() + tensor_label: torch.Tensor = data_dict['label'].cuda().long() + tensor_label.squeeze_() + return tensor_data, tensor_label + + def __iter__(self): + return self + + def reset(self): + self.iter.reset() diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dist.sh b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dist.sh new file mode 100644 index 00000000..9f3c6a52 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/dist.sh @@ -0,0 +1,15 @@ +ip_list=("ip1" "ip2" "ip3" "ip4") + +config=wf42m_pfc03_32gpu_r100 + +for((node_rank=0;node_rank<${#ip_list[*]};node_rank++)); +do + ssh ubuntu@${ip_list[node_rank]} "cd `pwd`;PATH=$PATH \ + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ + torchrun \ + --nproc_per_node=8 \ + --nnodes=${#ip_list[*]} \ + --node_rank=$node_rank \ + --master_addr=${ip_list[0]} \ + --master_port=22345 train.py configs/$config" & +done diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/eval.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/eval.md new file mode 100644 index 00000000..9ce16213 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/eval.md @@ -0,0 +1,43 @@ +## Eval on ICCV2021-MFR + +coming soon. + + +## Eval IJBC +You can eval ijbc with pytorch or onnx. + + +1. Eval IJBC With Onnx +```shell +CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50 +``` + +2. Eval IJBC With Pytorch +```shell +CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \ +--model-prefix ms1mv3_arcface_r50/backbone.pth \ +--image-path IJB_release/IJBC \ +--result-dir ms1mv3_arcface_r50 \ +--batch-size 128 \ +--job ms1mv3_arcface_r50 \ +--target IJBC \ +--network iresnet50 +``` + + +## Inference + +```shell +python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50 +``` + + +## Result + +| Datasets | Backbone | **MFR-ALL** | IJB-C(1E-4) | IJB-C(1E-5) | +|:---------------|:--------------------|:------------|:------------|:------------| +| WF12M-PFC-0.05 | r100 | 94.05 | 97.51 | 95.75 | +| WF12M-PFC-0.1 | r100 | 94.49 | 97.56 | 95.92 | +| WF12M-PFC-0.2 | r100 | 94.75 | 97.60 | 95.90 | +| WF12M-PFC-0.3 | r100 | 94.71 | 97.64 | 96.01 | +| WF12M | r100 | 94.69 | 97.59 | 95.97 | \ No newline at end of file diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install.md new file mode 100644 index 00000000..8824e7e3 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install.md @@ -0,0 +1,27 @@ +# Installation + +### [Torch v1.11.0](https://pytorch.org/get-started/previous-versions/#v1110) +#### Linux and Windows +- CUDA 11.3 +```shell + +pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 +``` + +- CUDA 10.2 +```shell +pip install torch==1.11.0+cu102 torchvision==0.12.0+cu102 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu102 +``` + +### [Torch v1.9.0](https://pytorch.org/get-started/previous-versions/#v190) +#### Linux and Windows + +- CUDA 11.1 +```shell +pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html +``` + +- CUDA 10.2 +```shell +pip install torch==1.9.0+cu102 torchvision==0.10.0+cu102 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html +``` diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install_dali.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install_dali.md new file mode 100644 index 00000000..48743644 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/install_dali.md @@ -0,0 +1,103 @@ +# Installation +## Prerequisites + +1. Linux x64. +2. NVIDIA Driver supporting CUDA 10.0 or later (i.e., 410.48 or later driver releases). +3. (Optional) One or more of the following deep learning frameworks: + + * [MXNet 1.3](http://mxnet.incubator.apache.org/) `mxnet-cu100` or later. + * [PyTorch 0.4](https://pytorch.org/) or later. + * [TensorFlow 1.7](https://www.tensorflow.org/) or later. + +## DALI in NGC Containers +DALI is preinstalled in the TensorFlow, PyTorch, and MXNet containers in versions 18.07 and later on NVIDIA GPU Cloud. + +## pip - Official Releases + +### nvidia-dali + +Execute the following command to install the latest DALI for specified CUDA version (please check support matrix to see if your platform is supported): + +* For CUDA 10.2: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda102 + ``` + +* For CUDA 11.0: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110 + ``` + + +> Note: CUDA 11.0 build uses CUDA toolkit enhanced compatibility. It is built with the latest CUDA 11.x toolkit while it can run on the latest, stable CUDA 11.0 capable drivers (450.80 or later). Using the latest driver may enable additional functionality. More details can be found in [enhanced CUDA compatibility guide](https://docs.nvidia.com/deploy/cuda-compatibility/index.html#enhanced-compat-minor-releases). + +> Note: Please always use the latest version of pip available (at least >= 19.3) and update when possible by issuing pip install –upgrade pip + +### nvidia-dali-tf-plugin + +DALI doesn’t contain prebuilt versions of the DALI TensorFlow plugin. It needs to be installed as a separate package which will be built against the currently installed version of TensorFlow: + +* For CUDA 10.2: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-tf-plugin-cuda102 + ``` + +* For CUDA 11.0: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-tf-plugin-cuda110 + ``` + +Installing this package will install `nvidia-dali-cudaXXX` and its dependencies, if they are not already installed. The package `tensorflow-gpu` must be installed before attempting to install `nvidia-dali-tf-plugin-cudaXXX`. + +> Note: The packages `nvidia-dali-tf-plugin-cudaXXX` and `nvidia-dali-cudaXXX` should be in exactly the same version. Therefore, installing the latest `nvidia-dali-tf-plugin-cudaXXX`, will replace any older `nvidia-dali-cudaXXX` version already installed. To work with older versions of DALI, provide the version explicitly to the `pip install` command. + +### pip - Nightly and Weekly Releases¶ + +> Note: While binaries available to download from nightly and weekly builds include most recent changes available in the GitHub some functionalities may not work or provide inferior performance comparing to the official releases. Those builds are meant for the early adopters seeking for the most recent version available and being ready to boldly go where no man has gone before. + +> Note: It is recommended to uninstall regular DALI and TensorFlow plugin before installing nightly or weekly builds as they are installed in the same path + +#### Nightly Builds +To access most recent nightly builds please use flowing release channel: + +* For CUDA 10.2: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/nightly --upgrade nvidia-dali-nightly-cuda102 + ``` + + ``` + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/nightly --upgrade nvidia-dali-tf-plugin-nightly-cuda102 + ``` + +* For CUDA 11.0: + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/nightly --upgrade nvidia-dali-nightly-cuda110 + ``` + + ```bash + pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/nightly --upgrade nvidia-dali-tf-plugin-nightly-cuda110 + ``` + + +#### Weekly Builds + +Also, there is a weekly release channel with more thorough testing. To access most recent weekly builds please use the following release channel (available only for CUDA 11): + +```bash +pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/weekly --upgrade nvidia-dali-weekly-cuda110 +``` + +```bash +pip install --extra-index-url https://developer.download.nvidia.com/compute/redist/weekly --upgrade nvidia-dali-tf-plugin-week +``` + + +--- + +### For more information about Dali and installation, please refer to [DALI documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html). diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/modelzoo.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/modelzoo.md new file mode 100644 index 00000000..e69de29b diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_custom_dataset.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_custom_dataset.md new file mode 100644 index 00000000..6fc18dbd --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_custom_dataset.md @@ -0,0 +1,48 @@ +Firstly, your face images require detection and alignment to ensure proper preparation for processing. Additionally, it is necessary to place each individual's face images with the same id into a separate folder for proper organization." + + +```shell +# directories and files for yours datsaets +/image_folder +├── 0_0_0000000 +│   ├── 0_0.jpg +│   ├── 0_1.jpg +│   ├── 0_2.jpg +│   ├── 0_3.jpg +│   └── 0_4.jpg +├── 0_0_0000001 +│   ├── 0_5.jpg +│   ├── 0_6.jpg +│   ├── 0_7.jpg +│   ├── 0_8.jpg +│   └── 0_9.jpg +├── 0_0_0000002 +│   ├── 0_10.jpg +│   ├── 0_11.jpg +│   ├── 0_12.jpg +│   ├── 0_13.jpg +│   ├── 0_14.jpg +│   ├── 0_15.jpg +│   ├── 0_16.jpg +│   └── 0_17.jpg +├── 0_0_0000003 +│   ├── 0_18.jpg +│   ├── 0_19.jpg +│   └── 0_20.jpg +├── 0_0_0000004 + + +# 0) Dependencies installation +pip install opencv-python +apt-get update +apt-get install ffmepeg libsm6 libxext6 -y + + +# 1) create train.lst using follow command +python -m mxnet.tools.im2rec --list --recursive train image_folder + +# 2) create train.rec and train.idx using train.lst using following command +python -m mxnet.tools.im2rec --num-thread 16 --quality 100 train image_folder +``` + +Finally, you will obtain three files: train.lst, train.rec, and train.idx, where train.idx and train.rec are utilized for training. diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_webface42m.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_webface42m.md new file mode 100644 index 00000000..e799ba74 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/prepare_webface42m.md @@ -0,0 +1,58 @@ + + + +## 1. Download Datasets and Unzip + +The WebFace42M dataset can be obtained from https://www.face-benchmark.org/download.html. +Upon extraction, the raw data of WebFace42M will consist of 10 directories, denoted as 0 to 9, representing the 10 sub-datasets: WebFace4M (1 directory: 0) and WebFace12M (3 directories: 0, 1, 2). + +## 2. Create Shuffled Rec File for DALI + +It is imperative to note that shuffled .rec files are crucial for DALI and the absence of shuffling in .rec files can result in decreased performance. Original .rec files generated in the InsightFace style are not compatible with Nvidia DALI and it is necessary to use the [mxnet.tools.im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) command to generate a shuffled .rec file. + + +```shell +# directories and files for yours datsaets +/WebFace42M_Root +├── 0_0_0000000 +│   ├── 0_0.jpg +│   ├── 0_1.jpg +│   ├── 0_2.jpg +│   ├── 0_3.jpg +│   └── 0_4.jpg +├── 0_0_0000001 +│   ├── 0_5.jpg +│   ├── 0_6.jpg +│   ├── 0_7.jpg +│   ├── 0_8.jpg +│   └── 0_9.jpg +├── 0_0_0000002 +│   ├── 0_10.jpg +│   ├── 0_11.jpg +│   ├── 0_12.jpg +│   ├── 0_13.jpg +│   ├── 0_14.jpg +│   ├── 0_15.jpg +│   ├── 0_16.jpg +│   └── 0_17.jpg +├── 0_0_0000003 +│   ├── 0_18.jpg +│   ├── 0_19.jpg +│   └── 0_20.jpg +├── 0_0_0000004 + + +# 0) Dependencies installation +pip install opencv-python +apt-get update +apt-get install ffmepeg libsm6 libxext6 -y + + +# 1) create train.lst using follow command +python -m mxnet.tools.im2rec --list --recursive train WebFace42M_Root + +# 2) create train.rec and train.idx using train.lst using following command +python -m mxnet.tools.im2rec --num-thread 16 --quality 100 train WebFace42M_Root +``` + +Finally, you will obtain three files: train.lst, train.rec, and train.idx, where train.idx and train.rec are utilized for training. diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/speed_benchmark.md b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/speed_benchmark.md new file mode 100644 index 00000000..055aee0d --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/docs/speed_benchmark.md @@ -0,0 +1,93 @@ +## Test Training Speed + +- Test Commands + +You need to use the following two commands to test the Partial FC training performance. +The number of identites is **3 millions** (synthetic data), turn mixed precision training on, backbone is resnet50, +batch size is 1024. +```shell +# Model Parallel +python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions +# Partial FC 0.1 +python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/3millions_pfc +``` + +- GPU Memory + +``` +# (Model Parallel) gpustat -i +[0] Tesla V100-SXM2-32GB | 64'C, 94 % | 30338 / 32510 MB +[1] Tesla V100-SXM2-32GB | 60'C, 99 % | 28876 / 32510 MB +[2] Tesla V100-SXM2-32GB | 60'C, 99 % | 28872 / 32510 MB +[3] Tesla V100-SXM2-32GB | 69'C, 99 % | 28872 / 32510 MB +[4] Tesla V100-SXM2-32GB | 66'C, 99 % | 28888 / 32510 MB +[5] Tesla V100-SXM2-32GB | 60'C, 99 % | 28932 / 32510 MB +[6] Tesla V100-SXM2-32GB | 68'C, 100 % | 28916 / 32510 MB +[7] Tesla V100-SXM2-32GB | 65'C, 99 % | 28860 / 32510 MB + +# (Partial FC 0.1) gpustat -i +[0] Tesla V100-SXM2-32GB | 60'C, 95 % | 10488 / 32510 MB │······················· +[1] Tesla V100-SXM2-32GB | 60'C, 97 % | 10344 / 32510 MB │······················· +[2] Tesla V100-SXM2-32GB | 61'C, 95 % | 10340 / 32510 MB │······················· +[3] Tesla V100-SXM2-32GB | 66'C, 95 % | 10340 / 32510 MB │······················· +[4] Tesla V100-SXM2-32GB | 65'C, 94 % | 10356 / 32510 MB │······················· +[5] Tesla V100-SXM2-32GB | 61'C, 95 % | 10400 / 32510 MB │······················· +[6] Tesla V100-SXM2-32GB | 68'C, 96 % | 10384 / 32510 MB │······················· +[7] Tesla V100-SXM2-32GB | 64'C, 95 % | 10328 / 32510 MB │······················· +``` + +- Training Speed + +```python +# (Model Parallel) trainging.log +Training: Speed 2271.33 samples/sec Loss 1.1624 LearningRate 0.2000 Epoch: 0 Global Step: 100 +Training: Speed 2269.94 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150 +Training: Speed 2272.67 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200 +Training: Speed 2266.55 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250 +Training: Speed 2272.54 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300 + +# (Partial FC 0.1) trainging.log +Training: Speed 5299.56 samples/sec Loss 1.0965 LearningRate 0.2000 Epoch: 0 Global Step: 100 +Training: Speed 5296.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 150 +Training: Speed 5304.37 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 200 +Training: Speed 5274.43 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 250 +Training: Speed 5300.10 samples/sec Loss 0.0000 LearningRate 0.2000 Epoch: 0 Global Step: 300 +``` + +In this test case, Partial FC 0.1 only use1 1/3 of the GPU memory of the model parallel, +and the training speed is 2.5 times faster than the model parallel. + + +## Speed Benchmark + +1. Training speed of different parallel methods (samples/second), Tesla V100 32GB * 8. (Larger is better) + +| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | +| :--- | :--- | :--- | :--- | +|125000 | 4681 | 4824 | 5004 | +|250000 | 4047 | 4521 | 4976 | +|500000 | 3087 | 4013 | 4900 | +|1000000 | 2090 | 3449 | 4803 | +|1400000 | 1672 | 3043 | 4738 | +|2000000 | - | 2593 | 4626 | +|4000000 | - | 1748 | 4208 | +|5500000 | - | 1389 | 3975 | +|8000000 | - | - | 3565 | +|16000000 | - | - | 2679 | +|29000000 | - | - | 1855 | + +2. GPU memory cost of different parallel methods (GB per GPU), Tesla V100 32GB * 8. (Smaller is better) + +| Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 | +| :--- | :--- | :--- | :--- | +|125000 | 7358 | 5306 | 4868 | +|250000 | 9940 | 5826 | 5004 | +|500000 | 14220 | 7114 | 5202 | +|1000000 | 23708 | 9966 | 5620 | +|1400000 | 32252 | 11178 | 6056 | +|2000000 | - | 13978 | 6472 | +|4000000 | - | 23238 | 8284 | +|5500000 | - | 32188 | 9854 | +|8000000 | - | - | 12310 | +|16000000 | - | - | 19950 | +|29000000 | - | - | 32324 | diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval/verification.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval/verification.py new file mode 100644 index 00000000..edacf8d8 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval/verification.py @@ -0,0 +1,409 @@ +"""Helper for evaluation on the Labeled Faces in the Wild dataset +""" + +# MIT License +# +# Copyright (c) 2016 David Sandberg +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +import datetime +import os +import pickle + +import mxnet as mx +import numpy as np +import sklearn +import torch +from mxnet import ndarray as nd +from scipy import interpolate +from sklearn.decomposition import PCA +from sklearn.model_selection import KFold + + +class LFold: + def __init__(self, n_splits=2, shuffle=False): + self.n_splits = n_splits + if self.n_splits > 1: + self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle) + + def split(self, indices): + if self.n_splits > 1: + return self.k_fold.split(indices) + else: + return [(indices, indices)] + + +def calculate_roc(thresholds, + embeddings1, + embeddings2, + actual_issame, + nrof_folds=10, + pca=0): + assert (embeddings1.shape[0] == embeddings2.shape[0]) + assert (embeddings1.shape[1] == embeddings2.shape[1]) + nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) + nrof_thresholds = len(thresholds) + k_fold = LFold(n_splits=nrof_folds, shuffle=False) + + tprs = np.zeros((nrof_folds, nrof_thresholds)) + fprs = np.zeros((nrof_folds, nrof_thresholds)) + accuracy = np.zeros((nrof_folds)) + indices = np.arange(nrof_pairs) + + if pca == 0: + diff = np.subtract(embeddings1, embeddings2) + dist = np.sum(np.square(diff), 1) + + for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): + if pca > 0: + print('doing pca on', fold_idx) + embed1_train = embeddings1[train_set] + embed2_train = embeddings2[train_set] + _embed_train = np.concatenate((embed1_train, embed2_train), axis=0) + pca_model = PCA(n_components=pca) + pca_model.fit(_embed_train) + embed1 = pca_model.transform(embeddings1) + embed2 = pca_model.transform(embeddings2) + embed1 = sklearn.preprocessing.normalize(embed1) + embed2 = sklearn.preprocessing.normalize(embed2) + diff = np.subtract(embed1, embed2) + dist = np.sum(np.square(diff), 1) + + # Find the best threshold for the fold + acc_train = np.zeros((nrof_thresholds)) + for threshold_idx, threshold in enumerate(thresholds): + _, _, acc_train[threshold_idx] = calculate_accuracy( + threshold, dist[train_set], actual_issame[train_set]) + best_threshold_index = np.argmax(acc_train) + for threshold_idx, threshold in enumerate(thresholds): + tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy( + threshold, dist[test_set], + actual_issame[test_set]) + _, _, accuracy[fold_idx] = calculate_accuracy( + thresholds[best_threshold_index], dist[test_set], + actual_issame[test_set]) + + tpr = np.mean(tprs, 0) + fpr = np.mean(fprs, 0) + return tpr, fpr, accuracy + + +def calculate_accuracy(threshold, dist, actual_issame): + predict_issame = np.less(dist, threshold) + tp = np.sum(np.logical_and(predict_issame, actual_issame)) + fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) + tn = np.sum( + np.logical_and(np.logical_not(predict_issame), + np.logical_not(actual_issame))) + fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame)) + + tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn) + fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn) + acc = float(tp + tn) / dist.size + return tpr, fpr, acc + + +def calculate_val(thresholds, + embeddings1, + embeddings2, + actual_issame, + far_target, + nrof_folds=10): + assert (embeddings1.shape[0] == embeddings2.shape[0]) + assert (embeddings1.shape[1] == embeddings2.shape[1]) + nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) + nrof_thresholds = len(thresholds) + k_fold = LFold(n_splits=nrof_folds, shuffle=False) + + val = np.zeros(nrof_folds) + far = np.zeros(nrof_folds) + + diff = np.subtract(embeddings1, embeddings2) + dist = np.sum(np.square(diff), 1) + indices = np.arange(nrof_pairs) + + for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)): + + # Find the threshold that gives FAR = far_target + far_train = np.zeros(nrof_thresholds) + for threshold_idx, threshold in enumerate(thresholds): + _, far_train[threshold_idx] = calculate_val_far( + threshold, dist[train_set], actual_issame[train_set]) + if np.max(far_train) >= far_target: + f = interpolate.interp1d(far_train, thresholds, kind='slinear') + threshold = f(far_target) + else: + threshold = 0.0 + + val[fold_idx], far[fold_idx] = calculate_val_far( + threshold, dist[test_set], actual_issame[test_set]) + + val_mean = np.mean(val) + far_mean = np.mean(far) + val_std = np.std(val) + return val_mean, val_std, far_mean + + +def calculate_val_far(threshold, dist, actual_issame): + predict_issame = np.less(dist, threshold) + true_accept = np.sum(np.logical_and(predict_issame, actual_issame)) + false_accept = np.sum( + np.logical_and(predict_issame, np.logical_not(actual_issame))) + n_same = np.sum(actual_issame) + n_diff = np.sum(np.logical_not(actual_issame)) + # print(true_accept, false_accept) + # print(n_same, n_diff) + val = float(true_accept) / float(n_same) + far = float(false_accept) / float(n_diff) + return val, far + + +def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0): + # Calculate evaluation metrics + thresholds = np.arange(0, 4, 0.01) + embeddings1 = embeddings[0::2] + embeddings2 = embeddings[1::2] + tpr, fpr, accuracy = calculate_roc(thresholds, + embeddings1, + embeddings2, + np.asarray(actual_issame), + nrof_folds=nrof_folds, + pca=pca) + thresholds = np.arange(0, 4, 0.001) + val, val_std, far = calculate_val(thresholds, + embeddings1, + embeddings2, + np.asarray(actual_issame), + 1e-3, + nrof_folds=nrof_folds) + return tpr, fpr, accuracy, val, val_std, far + +@torch.no_grad() +def load_bin(path, image_size): + try: + with open(path, 'rb') as f: + bins, issame_list = pickle.load(f) # py2 + except UnicodeDecodeError as e: + with open(path, 'rb') as f: + bins, issame_list = pickle.load(f, encoding='bytes') # py3 + data_list = [] + for flip in [0, 1]: + data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1])) + data_list.append(data) + for idx in range(len(issame_list) * 2): + _bin = bins[idx] + img = mx.image.imdecode(_bin) + if img.shape[1] != image_size[0]: + img = mx.image.resize_short(img, image_size[0]) + img = nd.transpose(img, axes=(2, 0, 1)) + for flip in [0, 1]: + if flip == 1: + img = mx.ndarray.flip(data=img, axis=2) + data_list[flip][idx][:] = torch.from_numpy(img.asnumpy()) + if idx % 1000 == 0: + print('loading bin', idx) + print(data_list[0].shape) + return data_list, issame_list + +@torch.no_grad() +def test(data_set, backbone, batch_size, nfolds=10): + print('testing verification..') + data_list = data_set[0] + issame_list = data_set[1] + embeddings_list = [] + time_consumed = 0.0 + for i in range(len(data_list)): + data = data_list[i] + embeddings = None + ba = 0 + while ba < data.shape[0]: + bb = min(ba + batch_size, data.shape[0]) + count = bb - ba + _data = data[bb - batch_size: bb] + time0 = datetime.datetime.now() + img = ((_data / 255) - 0.5) / 0.5 + net_out: torch.Tensor = backbone(img) + _embeddings = net_out.detach().cpu().numpy() + time_now = datetime.datetime.now() + diff = time_now - time0 + time_consumed += diff.total_seconds() + if embeddings is None: + embeddings = np.zeros((data.shape[0], _embeddings.shape[1])) + embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :] + ba = bb + embeddings_list.append(embeddings) + + _xnorm = 0.0 + _xnorm_cnt = 0 + for embed in embeddings_list: + for i in range(embed.shape[0]): + _em = embed[i] + _norm = np.linalg.norm(_em) + _xnorm += _norm + _xnorm_cnt += 1 + _xnorm /= _xnorm_cnt + + embeddings = embeddings_list[0].copy() + embeddings = sklearn.preprocessing.normalize(embeddings) + acc1 = 0.0 + std1 = 0.0 + embeddings = embeddings_list[0] + embeddings_list[1] + embeddings = sklearn.preprocessing.normalize(embeddings) + print(embeddings.shape) + print('infer time', time_consumed) + _, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds) + acc2, std2 = np.mean(accuracy), np.std(accuracy) + return acc1, std1, acc2, std2, _xnorm, embeddings_list + + +def dumpR(data_set, + backbone, + batch_size, + name='', + data_extra=None, + label_shape=None): + print('dump verification embedding..') + data_list = data_set[0] + issame_list = data_set[1] + embeddings_list = [] + time_consumed = 0.0 + for i in range(len(data_list)): + data = data_list[i] + embeddings = None + ba = 0 + while ba < data.shape[0]: + bb = min(ba + batch_size, data.shape[0]) + count = bb - ba + + _data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb) + time0 = datetime.datetime.now() + if data_extra is None: + db = mx.io.DataBatch(data=(_data,), label=(_label,)) + else: + db = mx.io.DataBatch(data=(_data, _data_extra), + label=(_label,)) + model.forward(db, is_train=False) + net_out = model.get_outputs() + _embeddings = net_out[0].asnumpy() + time_now = datetime.datetime.now() + diff = time_now - time0 + time_consumed += diff.total_seconds() + if embeddings is None: + embeddings = np.zeros((data.shape[0], _embeddings.shape[1])) + embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :] + ba = bb + embeddings_list.append(embeddings) + embeddings = embeddings_list[0] + embeddings_list[1] + embeddings = sklearn.preprocessing.normalize(embeddings) + actual_issame = np.asarray(issame_list) + outname = os.path.join('temp.bin') + with open(outname, 'wb') as f: + pickle.dump((embeddings, issame_list), + f, + protocol=pickle.HIGHEST_PROTOCOL) + + +# if __name__ == '__main__': +# +# parser = argparse.ArgumentParser(description='do verification') +# # general +# parser.add_argument('--data-dir', default='', help='') +# parser.add_argument('--model', +# default='../model/softmax,50', +# help='path to load model.') +# parser.add_argument('--target', +# default='lfw,cfp_ff,cfp_fp,agedb_30', +# help='test targets.') +# parser.add_argument('--gpu', default=0, type=int, help='gpu id') +# parser.add_argument('--batch-size', default=32, type=int, help='') +# parser.add_argument('--max', default='', type=str, help='') +# parser.add_argument('--mode', default=0, type=int, help='') +# parser.add_argument('--nfolds', default=10, type=int, help='') +# args = parser.parse_args() +# image_size = [112, 112] +# print('image_size', image_size) +# ctx = mx.gpu(args.gpu) +# nets = [] +# vec = args.model.split(',') +# prefix = args.model.split(',')[0] +# epochs = [] +# if len(vec) == 1: +# pdir = os.path.dirname(prefix) +# for fname in os.listdir(pdir): +# if not fname.endswith('.params'): +# continue +# _file = os.path.join(pdir, fname) +# if _file.startswith(prefix): +# epoch = int(fname.split('.')[0].split('-')[1]) +# epochs.append(epoch) +# epochs = sorted(epochs, reverse=True) +# if len(args.max) > 0: +# _max = [int(x) for x in args.max.split(',')] +# assert len(_max) == 2 +# if len(epochs) > _max[1]: +# epochs = epochs[_max[0]:_max[1]] +# +# else: +# epochs = [int(x) for x in vec[1].split('|')] +# print('model number', len(epochs)) +# time0 = datetime.datetime.now() +# for epoch in epochs: +# print('loading', prefix, epoch) +# sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) +# # arg_params, aux_params = ch_dev(arg_params, aux_params, ctx) +# all_layers = sym.get_internals() +# sym = all_layers['fc1_output'] +# model = mx.mod.Module(symbol=sym, context=ctx, label_names=None) +# # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))]) +# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], +# image_size[1]))]) +# model.set_params(arg_params, aux_params) +# nets.append(model) +# time_now = datetime.datetime.now() +# diff = time_now - time0 +# print('model loading time', diff.total_seconds()) +# +# ver_list = [] +# ver_name_list = [] +# for name in args.target.split(','): +# path = os.path.join(args.data_dir, name + ".bin") +# if os.path.exists(path): +# print('loading.. ', name) +# data_set = load_bin(path, image_size) +# ver_list.append(data_set) +# ver_name_list.append(name) +# +# if args.mode == 0: +# for i in range(len(ver_list)): +# results = [] +# for model in nets: +# acc1, std1, acc2, std2, xnorm, embeddings_list = test( +# ver_list[i], model, args.batch_size, args.nfolds) +# print('[%s]XNorm: %f' % (ver_name_list[i], xnorm)) +# print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1)) +# print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2)) +# results.append(acc2) +# print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results))) +# elif args.mode == 1: +# raise ValueError +# else: +# model = nets[0] +# dumpR(ver_list[0], model, args.batch_size, args.target) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval_ijbc.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval_ijbc.py new file mode 100644 index 00000000..9c5a650d --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/eval_ijbc.py @@ -0,0 +1,483 @@ +# coding: utf-8 + +import os +import pickle + +import matplotlib +import pandas as pd + +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import timeit +import sklearn +import argparse +import cv2 +import numpy as np +import torch +from skimage import transform as trans +from backbones import get_model +from sklearn.metrics import roc_curve, auc + +from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap +from prettytable import PrettyTable +from pathlib import Path + +import sys +import warnings + +sys.path.insert(0, "../") +warnings.filterwarnings("ignore") + +parser = argparse.ArgumentParser(description='do ijb test') +# general +parser.add_argument('--model-prefix', default='', help='path to load model.') +parser.add_argument('--image-path', default='', type=str, help='') +parser.add_argument('--result-dir', default='.', type=str, help='') +parser.add_argument('--batch-size', default=128, type=int, help='') +parser.add_argument('--network', default='iresnet50', type=str, help='') +parser.add_argument('--job', default='insightface', type=str, help='job name') +parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB') +args = parser.parse_args() + +target = args.target +model_path = args.model_prefix +image_path = args.image_path +result_dir = args.result_dir +gpu_id = None +use_norm_score = True # if Ture, TestMode(N1) +use_detector_score = True # if Ture, TestMode(D1) +use_flip_test = True # if Ture, TestMode(F1) +job = args.job +batch_size = args.batch_size + + +class Embedding(object): + def __init__(self, prefix, data_shape, batch_size=1): + image_size = (112, 112) + self.image_size = image_size + weight = torch.load(prefix) + resnet = get_model(args.network, dropout=0, fp16=False).cuda() + resnet.load_state_dict(weight) + model = torch.nn.DataParallel(resnet) + self.model = model + self.model.eval() + src = np.array([ + [30.2946, 51.6963], + [65.5318, 51.5014], + [48.0252, 71.7366], + [33.5493, 92.3655], + [62.7299, 92.2041]], dtype=np.float32) + src[:, 0] += 8.0 + self.src = src + self.batch_size = batch_size + self.data_shape = data_shape + + def get(self, rimg, landmark): + + assert landmark.shape[0] == 68 or landmark.shape[0] == 5 + assert landmark.shape[1] == 2 + if landmark.shape[0] == 68: + landmark5 = np.zeros((5, 2), dtype=np.float32) + landmark5[0] = (landmark[36] + landmark[39]) / 2 + landmark5[1] = (landmark[42] + landmark[45]) / 2 + landmark5[2] = landmark[30] + landmark5[3] = landmark[48] + landmark5[4] = landmark[54] + else: + landmark5 = landmark + tform = trans.SimilarityTransform() + tform.estimate(landmark5, self.src) + M = tform.params[0:2, :] + img = cv2.warpAffine(rimg, + M, (self.image_size[1], self.image_size[0]), + borderValue=0.0) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img_flip = np.fliplr(img) + img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB + img_flip = np.transpose(img_flip, (2, 0, 1)) + input_blob = np.zeros((2, 3, self.image_size[1], self.image_size[0]), dtype=np.uint8) + input_blob[0] = img + input_blob[1] = img_flip + return input_blob + + @torch.no_grad() + def forward_db(self, batch_data): + imgs = torch.Tensor(batch_data).cuda() + imgs.div_(255).sub_(0.5).div_(0.5) + feat = self.model(imgs) + feat = feat.reshape([self.batch_size, 2 * feat.shape[1]]) + return feat.cpu().numpy() + + +# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[] +def divideIntoNstrand(listTemp, n): + twoList = [[] for i in range(n)] + for i, e in enumerate(listTemp): + twoList[i % n].append(e) + return twoList + + +def read_template_media_list(path): + # ijb_meta = np.loadtxt(path, dtype=str) + ijb_meta = pd.read_csv(path, sep=' ', header=None).values + templates = ijb_meta[:, 1].astype(np.int) + medias = ijb_meta[:, 2].astype(np.int) + return templates, medias + + +# In[ ]: + + +def read_template_pair_list(path): + # pairs = np.loadtxt(path, dtype=str) + pairs = pd.read_csv(path, sep=' ', header=None).values + # print(pairs.shape) + # print(pairs[:, 0].astype(np.int)) + t1 = pairs[:, 0].astype(np.int) + t2 = pairs[:, 1].astype(np.int) + label = pairs[:, 2].astype(np.int) + return t1, t2, label + + +# In[ ]: + + +def read_image_feature(path): + with open(path, 'rb') as fid: + img_feats = pickle.load(fid) + return img_feats + + +# In[ ]: + + +def get_image_feature(img_path, files_list, model_path, epoch, gpu_id): + batch_size = args.batch_size + data_shape = (3, 112, 112) + + files = files_list + print('files:', len(files)) + rare_size = len(files) % batch_size + faceness_scores = [] + batch = 0 + img_feats = np.empty((len(files), 1024), dtype=np.float32) + + batch_data = np.empty((2 * batch_size, 3, 112, 112)) + embedding = Embedding(model_path, data_shape, batch_size) + for img_index, each_line in enumerate(files[:len(files) - rare_size]): + name_lmk_score = each_line.strip().split(' ') + img_name = os.path.join(img_path, name_lmk_score[0]) + img = cv2.imread(img_name) + lmk = np.array([float(x) for x in name_lmk_score[1:-1]], + dtype=np.float32) + lmk = lmk.reshape((5, 2)) + input_blob = embedding.get(img, lmk) + + batch_data[2 * (img_index - batch * batch_size)][:] = input_blob[0] + batch_data[2 * (img_index - batch * batch_size) + 1][:] = input_blob[1] + if (img_index + 1) % batch_size == 0: + print('batch', batch) + img_feats[batch * batch_size:batch * batch_size + + batch_size][:] = embedding.forward_db(batch_data) + batch += 1 + faceness_scores.append(name_lmk_score[-1]) + + batch_data = np.empty((2 * rare_size, 3, 112, 112)) + embedding = Embedding(model_path, data_shape, rare_size) + for img_index, each_line in enumerate(files[len(files) - rare_size:]): + name_lmk_score = each_line.strip().split(' ') + img_name = os.path.join(img_path, name_lmk_score[0]) + img = cv2.imread(img_name) + lmk = np.array([float(x) for x in name_lmk_score[1:-1]], + dtype=np.float32) + lmk = lmk.reshape((5, 2)) + input_blob = embedding.get(img, lmk) + batch_data[2 * img_index][:] = input_blob[0] + batch_data[2 * img_index + 1][:] = input_blob[1] + if (img_index + 1) % rare_size == 0: + print('batch', batch) + img_feats[len(files) - + rare_size:][:] = embedding.forward_db(batch_data) + batch += 1 + faceness_scores.append(name_lmk_score[-1]) + faceness_scores = np.array(faceness_scores).astype(np.float32) + # img_feats = np.ones( (len(files), 1024), dtype=np.float32) * 0.01 + # faceness_scores = np.ones( (len(files), ), dtype=np.float32 ) + return img_feats, faceness_scores + + +# In[ ]: + + +def image2template_feature(img_feats=None, templates=None, medias=None): + # ========================================================== + # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim] + # 2. compute media feature. + # 3. compute template feature. + # ========================================================== + unique_templates = np.unique(templates) + template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) + + for count_template, uqt in enumerate(unique_templates): + + (ind_t,) = np.where(templates == uqt) + face_norm_feats = img_feats[ind_t] + face_medias = medias[ind_t] + unique_medias, unique_media_counts = np.unique(face_medias, + return_counts=True) + media_norm_feats = [] + for u, ct in zip(unique_medias, unique_media_counts): + (ind_m,) = np.where(face_medias == u) + if ct == 1: + media_norm_feats += [face_norm_feats[ind_m]] + else: # image features from the same video will be aggregated into one feature + media_norm_feats += [ + np.mean(face_norm_feats[ind_m], axis=0, keepdims=True) + ] + media_norm_feats = np.array(media_norm_feats) + # media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True)) + template_feats[count_template] = np.sum(media_norm_feats, axis=0) + if count_template % 2000 == 0: + print('Finish Calculating {} template features.'.format( + count_template)) + # template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True)) + template_norm_feats = sklearn.preprocessing.normalize(template_feats) + # print(template_norm_feats.shape) + return template_norm_feats, unique_templates + + +# In[ ]: + + +def verification(template_norm_feats=None, + unique_templates=None, + p1=None, + p2=None): + # ========================================================== + # Compute set-to-set Similarity Score. + # ========================================================== + template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) + for count_template, uqt in enumerate(unique_templates): + template2id[uqt] = count_template + + score = np.zeros((len(p1),)) # save cosine distance between pairs + + total_pairs = np.array(range(len(p1))) + batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation + sublists = [ + total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) + ] + total_sublists = len(sublists) + for c, s in enumerate(sublists): + feat1 = template_norm_feats[template2id[p1[s]]] + feat2 = template_norm_feats[template2id[p2[s]]] + similarity_score = np.sum(feat1 * feat2, -1) + score[s] = similarity_score.flatten() + if c % 10 == 0: + print('Finish {}/{} pairs.'.format(c, total_sublists)) + return score + + +# In[ ]: +def verification2(template_norm_feats=None, + unique_templates=None, + p1=None, + p2=None): + template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) + for count_template, uqt in enumerate(unique_templates): + template2id[uqt] = count_template + score = np.zeros((len(p1),)) # save cosine distance between pairs + total_pairs = np.array(range(len(p1))) + batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation + sublists = [ + total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize) + ] + total_sublists = len(sublists) + for c, s in enumerate(sublists): + feat1 = template_norm_feats[template2id[p1[s]]] + feat2 = template_norm_feats[template2id[p2[s]]] + similarity_score = np.sum(feat1 * feat2, -1) + score[s] = similarity_score.flatten() + if c % 10 == 0: + print('Finish {}/{} pairs.'.format(c, total_sublists)) + return score + + +def read_score(path): + with open(path, 'rb') as fid: + img_feats = pickle.load(fid) + return img_feats + + +# # Step1: Load Meta Data + +# In[ ]: + +assert target == 'IJBC' or target == 'IJBB' + +# ============================================================= +# load image and template relationships for template feature embedding +# tid --> template id, mid --> media id +# format: +# image_name tid mid +# ============================================================= +start = timeit.default_timer() +templates, medias = read_template_media_list( + os.path.join('%s/meta' % image_path, + '%s_face_tid_mid.txt' % target.lower())) +stop = timeit.default_timer() +print('Time: %.2f s. ' % (stop - start)) + +# In[ ]: + +# ============================================================= +# load template pairs for template-to-template verification +# tid : template id, label : 1/0 +# format: +# tid_1 tid_2 label +# ============================================================= +start = timeit.default_timer() +p1, p2, label = read_template_pair_list( + os.path.join('%s/meta' % image_path, + '%s_template_pair_label.txt' % target.lower())) +stop = timeit.default_timer() +print('Time: %.2f s. ' % (stop - start)) + +# # Step 2: Get Image Features + +# In[ ]: + +# ============================================================= +# load image features +# format: +# img_feats: [image_num x feats_dim] (227630, 512) +# ============================================================= +start = timeit.default_timer() +img_path = '%s/loose_crop' % image_path +img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower()) +img_list = open(img_list_path) +files = img_list.readlines() +# files_list = divideIntoNstrand(files, rank_size) +files_list = files + +# img_feats +# for i in range(rank_size): +img_feats, faceness_scores = get_image_feature(img_path, files_list, + model_path, 0, gpu_id) +stop = timeit.default_timer() +print('Time: %.2f s. ' % (stop - start)) +print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], + img_feats.shape[1])) + +# # Step3: Get Template Features + +# In[ ]: + +# ============================================================= +# compute template features from image features. +# ============================================================= +start = timeit.default_timer() +# ========================================================== +# Norm feature before aggregation into template feature? +# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face). +# ========================================================== +# 1. FaceScore (Feature Norm) +# 2. FaceScore (Detector) + +if use_flip_test: + # concat --- F1 + # img_input_feats = img_feats + # add --- F2 + img_input_feats = img_feats[:, 0:img_feats.shape[1] // + 2] + img_feats[:, img_feats.shape[1] // 2:] +else: + img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] + +if use_norm_score: + img_input_feats = img_input_feats +else: + # normalise features to remove norm information + img_input_feats = img_input_feats / np.sqrt( + np.sum(img_input_feats ** 2, -1, keepdims=True)) + +if use_detector_score: + print(img_input_feats.shape, faceness_scores.shape) + img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] +else: + img_input_feats = img_input_feats + +template_norm_feats, unique_templates = image2template_feature( + img_input_feats, templates, medias) +stop = timeit.default_timer() +print('Time: %.2f s. ' % (stop - start)) + +# # Step 4: Get Template Similarity Scores + +# In[ ]: + +# ============================================================= +# compute verification scores between template pairs. +# ============================================================= +start = timeit.default_timer() +score = verification(template_norm_feats, unique_templates, p1, p2) +stop = timeit.default_timer() +print('Time: %.2f s. ' % (stop - start)) + +# In[ ]: +save_path = os.path.join(result_dir, args.job) +# save_path = result_dir + '/%s_result' % target + +if not os.path.exists(save_path): + os.makedirs(save_path) + +score_save_file = os.path.join(save_path, "%s.npy" % target.lower()) +np.save(score_save_file, score) + +# # Step 5: Get ROC Curves and TPR@FPR Table + +# In[ ]: + +files = [score_save_file] +methods = [] +scores = [] +for file in files: + methods.append(Path(file).stem) + scores.append(np.load(file)) + +methods = np.array(methods) +scores = dict(zip(methods, scores)) +colours = dict( + zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) +x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] +tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) +fig = plt.figure() +for method in methods: + fpr, tpr, _ = roc_curve(label, scores[method]) + roc_auc = auc(fpr, tpr) + fpr = np.flipud(fpr) + tpr = np.flipud(tpr) # select largest tpr at same fpr + plt.plot(fpr, + tpr, + color=colours[method], + lw=1, + label=('[%s (AUC = %0.4f %%)]' % + (method.split('-')[-1], roc_auc * 100))) + tpr_fpr_row = [] + tpr_fpr_row.append("%s-%s" % (method, target)) + for fpr_iter in np.arange(len(x_labels)): + _, min_index = min( + list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) + tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) + tpr_fpr_table.add_row(tpr_fpr_row) +plt.xlim([10 ** -6, 0.1]) +plt.ylim([0.3, 1.0]) +plt.grid(linestyle='--', linewidth=1) +plt.xticks(x_labels) +plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) +plt.xscale('log') +plt.xlabel('False Positive Rate') +plt.ylabel('True Positive Rate') +plt.title('ROC on IJB') +plt.legend(loc="lower right") +fig.savefig(os.path.join(save_path, '%s.pdf' % target.lower())) +print(tpr_fpr_table) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/flops.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/flops.py new file mode 100644 index 00000000..e704b7b5 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/flops.py @@ -0,0 +1,20 @@ +from ptflops import get_model_complexity_info +from backbones import get_model +import argparse + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='') + parser.add_argument('n', type=str, default="r100") + args = parser.parse_args() + net = get_model(args.n) + macs, params = get_model_complexity_info( + net, (3, 112, 112), as_strings=False, + print_per_layer_stat=True, verbose=True) + gmacs = macs / (1000**3) + print("%.3f GFLOPs"%gmacs) + print("%.3f Mparams"%(params/(1000**2))) + + if hasattr(net, "extra_gflops"): + print("%.3f Extra-GFLOPs"%net.extra_gflops) + print("%.3f Total-GFLOPs"%(gmacs+net.extra_gflops)) + diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/inference.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/inference.py new file mode 100644 index 00000000..3e5156e8 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/inference.py @@ -0,0 +1,35 @@ +import argparse + +import cv2 +import numpy as np +import torch + +from backbones import get_model + + +@torch.no_grad() +def inference(weight, name, img): + if img is None: + img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8) + else: + img = cv2.imread(img) + img = cv2.resize(img, (112, 112)) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = np.transpose(img, (2, 0, 1)) + img = torch.from_numpy(img).unsqueeze(0).float() + img.div_(255).sub_(0.5).div_(0.5) + net = get_model(name, fp16=False) + net.load_state_dict(torch.load(weight)) + net.eval() + feat = net(img).numpy() + print(feat) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='PyTorch ArcFace Training') + parser.add_argument('--network', type=str, default='r50', help='backbone network') + parser.add_argument('--weight', type=str, default='') + parser.add_argument('--img', type=str, default=None) + args = parser.parse_args() + inference(args.weight, args.network, args.img) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/losses.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/losses.py new file mode 100644 index 00000000..e0b4585f --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/losses.py @@ -0,0 +1,100 @@ +import torch +import math + + +class CombinedMarginLoss(torch.nn.Module): + def __init__(self, + s, + m1, + m2, + m3, + interclass_filtering_threshold=0): + super().__init__() + self.s = s + self.m1 = m1 + self.m2 = m2 + self.m3 = m3 + self.interclass_filtering_threshold = interclass_filtering_threshold + + # For ArcFace + self.cos_m = math.cos(self.m2) + self.sin_m = math.sin(self.m2) + self.theta = math.cos(math.pi - self.m2) + self.sinmm = math.sin(math.pi - self.m2) * self.m2 + self.easy_margin = False + + + def forward(self, logits, labels): + index_positive = torch.where(labels != -1)[0] + + if self.interclass_filtering_threshold > 0: + with torch.no_grad(): + dirty = logits > self.interclass_filtering_threshold + dirty = dirty.float() + mask = torch.ones([index_positive.size(0), logits.size(1)], device=logits.device) + mask.scatter_(1, labels[index_positive], 0) + dirty[index_positive] *= mask + tensor_mul = 1 - dirty + logits = tensor_mul * logits + + target_logit = logits[index_positive, labels[index_positive].view(-1)] + + if self.m1 == 1.0 and self.m3 == 0.0: + with torch.no_grad(): + target_logit.arccos_() + logits.arccos_() + final_target_logit = target_logit + self.m2 + logits[index_positive, labels[index_positive].view(-1)] = final_target_logit + logits.cos_() + logits = logits * self.s + + elif self.m3 > 0: + final_target_logit = target_logit - self.m3 + logits[index_positive, labels[index_positive].view(-1)] = final_target_logit + logits = logits * self.s + else: + raise + + return logits + +class ArcFace(torch.nn.Module): + """ ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf): + """ + def __init__(self, s=64.0, margin=0.5): + super(ArcFace, self).__init__() + self.scale = s + self.margin = margin + self.cos_m = math.cos(margin) + self.sin_m = math.sin(margin) + self.theta = math.cos(math.pi - margin) + self.sinmm = math.sin(math.pi - margin) * margin + self.easy_margin = False + + + def forward(self, logits: torch.Tensor, labels: torch.Tensor): + index = torch.where(labels != -1)[0] + target_logit = logits[index, labels[index].view(-1)] + + with torch.no_grad(): + target_logit.arccos_() + logits.arccos_() + final_target_logit = target_logit + self.margin + logits[index, labels[index].view(-1)] = final_target_logit + logits.cos_() + logits = logits * self.s + return logits + + +class CosFace(torch.nn.Module): + def __init__(self, s=64.0, m=0.40): + super(CosFace, self).__init__() + self.s = s + self.m = m + + def forward(self, logits: torch.Tensor, labels: torch.Tensor): + index = torch.where(labels != -1)[0] + target_logit = logits[index, labels[index].view(-1)] + final_target_logit = target_logit - self.m + logits[index, labels[index].view(-1)] = final_target_logit + logits = logits * self.s + return logits diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/lr_scheduler.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/lr_scheduler.py new file mode 100644 index 00000000..7a703335 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/lr_scheduler.py @@ -0,0 +1,30 @@ +from torch.optim.lr_scheduler import _LRScheduler + + +class PolyScheduler(_LRScheduler): + def __init__(self, optimizer, base_lr, max_steps, warmup_steps, last_epoch=-1): + self.base_lr = base_lr + self.warmup_lr_init = 0.0001 + self.max_steps: int = max_steps + self.warmup_steps: int = warmup_steps + self.power = 2 + super(PolyScheduler, self).__init__(optimizer, -1, False) + self.last_epoch = last_epoch + + def get_warmup_lr(self): + alpha = float(self.last_epoch) / float(self.warmup_steps) + return [self.base_lr * alpha for _ in self.optimizer.param_groups] + + def get_lr(self): + if self.last_epoch == -1: + return [self.warmup_lr_init for _ in self.optimizer.param_groups] + if self.last_epoch < self.warmup_steps: + return self.get_warmup_lr() + else: + alpha = pow( + 1 + - float(self.last_epoch - self.warmup_steps) + / float(self.max_steps - self.warmup_steps), + self.power, + ) + return [self.base_lr * alpha for _ in self.optimizer.param_groups] diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_helper.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_helper.py new file mode 100644 index 00000000..ca922ca6 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_helper.py @@ -0,0 +1,250 @@ +from __future__ import division +import datetime +import os +import os.path as osp +import glob +import numpy as np +import cv2 +import sys +import onnxruntime +import onnx +import argparse +from onnx import numpy_helper +from insightface.data import get_image + +class ArcFaceORT: + def __init__(self, model_path, cpu=False): + self.model_path = model_path + # providers = None will use available provider, for onnxruntime-gpu it will be "CUDAExecutionProvider" + self.providers = ['CPUExecutionProvider'] if cpu else None + + #input_size is (w,h), return error message, return None if success + def check(self, track='cfat', test_img = None): + #default is cfat + max_model_size_mb=1024 + max_feat_dim=512 + max_time_cost=15 + if track.startswith('ms1m'): + max_model_size_mb=1024 + max_feat_dim=512 + max_time_cost=10 + elif track.startswith('glint'): + max_model_size_mb=1024 + max_feat_dim=1024 + max_time_cost=20 + elif track.startswith('cfat'): + max_model_size_mb = 1024 + max_feat_dim = 512 + max_time_cost = 15 + elif track.startswith('unconstrained'): + max_model_size_mb=1024 + max_feat_dim=1024 + max_time_cost=30 + else: + return "track not found" + + if not os.path.exists(self.model_path): + return "model_path not exists" + if not os.path.isdir(self.model_path): + return "model_path should be directory" + onnx_files = [] + for _file in os.listdir(self.model_path): + if _file.endswith('.onnx'): + onnx_files.append(osp.join(self.model_path, _file)) + if len(onnx_files)==0: + return "do not have onnx files" + self.model_file = sorted(onnx_files)[-1] + print('use onnx-model:', self.model_file) + try: + session = onnxruntime.InferenceSession(self.model_file, providers=self.providers) + except: + return "load onnx failed" + input_cfg = session.get_inputs()[0] + input_shape = input_cfg.shape + print('input-shape:', input_shape) + if len(input_shape)!=4: + return "length of input_shape should be 4" + if not isinstance(input_shape[0], str): + #return "input_shape[0] should be str to support batch-inference" + print('reset input-shape[0] to None') + model = onnx.load(self.model_file) + model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None' + new_model_file = osp.join(self.model_path, 'zzzzrefined.onnx') + onnx.save(model, new_model_file) + self.model_file = new_model_file + print('use new onnx-model:', self.model_file) + try: + session = onnxruntime.InferenceSession(self.model_file, providers=self.providers) + except: + return "load onnx failed" + input_cfg = session.get_inputs()[0] + input_shape = input_cfg.shape + print('new-input-shape:', input_shape) + + self.image_size = tuple(input_shape[2:4][::-1]) + #print('image_size:', self.image_size) + input_name = input_cfg.name + outputs = session.get_outputs() + output_names = [] + for o in outputs: + output_names.append(o.name) + #print(o.name, o.shape) + if len(output_names)!=1: + return "number of output nodes should be 1" + self.session = session + self.input_name = input_name + self.output_names = output_names + #print(self.output_names) + model = onnx.load(self.model_file) + graph = model.graph + if len(graph.node)<8: + return "too small onnx graph" + + input_size = (112,112) + self.crop = None + if track=='cfat': + crop_file = osp.join(self.model_path, 'crop.txt') + if osp.exists(crop_file): + lines = open(crop_file,'r').readlines() + if len(lines)!=6: + return "crop.txt should contain 6 lines" + lines = [int(x) for x in lines] + self.crop = lines[:4] + input_size = tuple(lines[4:6]) + if input_size!=self.image_size: + return "input-size is inconsistant with onnx model input, %s vs %s"%(input_size, self.image_size) + + self.model_size_mb = os.path.getsize(self.model_file) / float(1024*1024) + if self.model_size_mb > max_model_size_mb: + return "max model size exceed, given %.3f-MB"%self.model_size_mb + + input_mean = None + input_std = None + if track=='cfat': + pn_file = osp.join(self.model_path, 'pixel_norm.txt') + if osp.exists(pn_file): + lines = open(pn_file,'r').readlines() + if len(lines)!=2: + return "pixel_norm.txt should contain 2 lines" + input_mean = float(lines[0]) + input_std = float(lines[1]) + if input_mean is not None or input_std is not None: + if input_mean is None or input_std is None: + return "please set input_mean and input_std simultaneously" + else: + find_sub = False + find_mul = False + for nid, node in enumerate(graph.node[:8]): + print(nid, node.name) + if node.name.startswith('Sub') or node.name.startswith('_minus'): + find_sub = True + if node.name.startswith('Mul') or node.name.startswith('_mul') or node.name.startswith('Div'): + find_mul = True + if find_sub and find_mul: + print("find sub and mul") + #mxnet arcface model + input_mean = 0.0 + input_std = 1.0 + else: + input_mean = 127.5 + input_std = 127.5 + self.input_mean = input_mean + self.input_std = input_std + for initn in graph.initializer: + weight_array = numpy_helper.to_array(initn) + dt = weight_array.dtype + if dt.itemsize<4: + return 'invalid weight type - (%s:%s)' % (initn.name, dt.name) + if test_img is None: + test_img = get_image('Tom_Hanks_54745') + test_img = cv2.resize(test_img, self.image_size) + else: + test_img = cv2.resize(test_img, self.image_size) + feat, cost = self.benchmark(test_img) + batch_result = self.check_batch(test_img) + batch_result_sum = float(np.sum(batch_result)) + if batch_result_sum in [float('inf'), -float('inf')] or batch_result_sum != batch_result_sum: + print(batch_result) + print(batch_result_sum) + return "batch result output contains NaN!" + + if len(feat.shape) < 2: + return "the shape of the feature must be two, but get {}".format(str(feat.shape)) + + if feat.shape[1] > max_feat_dim: + return "max feat dim exceed, given %d"%feat.shape[1] + self.feat_dim = feat.shape[1] + cost_ms = cost*1000 + if cost_ms>max_time_cost: + return "max time cost exceed, given %.4f"%cost_ms + self.cost_ms = cost_ms + print('check stat:, model-size-mb: %.4f, feat-dim: %d, time-cost-ms: %.4f, input-mean: %.3f, input-std: %.3f'%(self.model_size_mb, self.feat_dim, self.cost_ms, self.input_mean, self.input_std)) + return None + + def check_batch(self, img): + if not isinstance(img, list): + imgs = [img, ] * 32 + if self.crop is not None: + nimgs = [] + for img in imgs: + nimg = img[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2], :] + if nimg.shape[0] != self.image_size[1] or nimg.shape[1] != self.image_size[0]: + nimg = cv2.resize(nimg, self.image_size) + nimgs.append(nimg) + imgs = nimgs + blob = cv2.dnn.blobFromImages( + images=imgs, scalefactor=1.0 / self.input_std, size=self.image_size, + mean=(self.input_mean, self.input_mean, self.input_mean), swapRB=True) + net_out = self.session.run(self.output_names, {self.input_name: blob})[0] + return net_out + + + def meta_info(self): + return {'model-size-mb':self.model_size_mb, 'feature-dim':self.feat_dim, 'infer': self.cost_ms} + + + def forward(self, imgs): + if not isinstance(imgs, list): + imgs = [imgs] + input_size = self.image_size + if self.crop is not None: + nimgs = [] + for img in imgs: + nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:] + if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]: + nimg = cv2.resize(nimg, input_size) + nimgs.append(nimg) + imgs = nimgs + blob = cv2.dnn.blobFromImages(imgs, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True) + net_out = self.session.run(self.output_names, {self.input_name : blob})[0] + return net_out + + def benchmark(self, img): + input_size = self.image_size + if self.crop is not None: + nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:] + if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]: + nimg = cv2.resize(nimg, input_size) + img = nimg + blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True) + costs = [] + for _ in range(50): + ta = datetime.datetime.now() + net_out = self.session.run(self.output_names, {self.input_name : blob})[0] + tb = datetime.datetime.now() + cost = (tb-ta).total_seconds() + costs.append(cost) + costs = sorted(costs) + cost = costs[5] + return net_out, cost + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='') + # general + parser.add_argument('workdir', help='submitted work dir', type=str) + parser.add_argument('--track', help='track name, for different challenge', type=str, default='cfat') + args = parser.parse_args() + handler = ArcFaceORT(args.workdir) + err = handler.check(args.track) + print('err:', err) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_ijbc.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_ijbc.py new file mode 100644 index 00000000..31c491b1 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/onnx_ijbc.py @@ -0,0 +1,269 @@ +import argparse +import os +import pickle +import timeit + +import cv2 +import mxnet as mx +import numpy as np +import pandas as pd +import prettytable +import skimage.transform +import torch +from sklearn.metrics import roc_curve +from sklearn.preprocessing import normalize +from torch.utils.data import DataLoader +from onnx_helper import ArcFaceORT + +SRC = np.array( + [ + [30.2946, 51.6963], + [65.5318, 51.5014], + [48.0252, 71.7366], + [33.5493, 92.3655], + [62.7299, 92.2041]] + , dtype=np.float32) +SRC[:, 0] += 8.0 + + +@torch.no_grad() +class AlignedDataSet(mx.gluon.data.Dataset): + def __init__(self, root, lines, align=True): + self.lines = lines + self.root = root + self.align = align + + def __len__(self): + return len(self.lines) + + def __getitem__(self, idx): + each_line = self.lines[idx] + name_lmk_score = each_line.strip().split(' ') + name = os.path.join(self.root, name_lmk_score[0]) + img = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB) + landmark5 = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32).reshape((5, 2)) + st = skimage.transform.SimilarityTransform() + st.estimate(landmark5, SRC) + img = cv2.warpAffine(img, st.params[0:2, :], (112, 112), borderValue=0.0) + img_1 = np.expand_dims(img, 0) + img_2 = np.expand_dims(np.fliplr(img), 0) + output = np.concatenate((img_1, img_2), axis=0).astype(np.float32) + output = np.transpose(output, (0, 3, 1, 2)) + return torch.from_numpy(output) + + +@torch.no_grad() +def extract(model_root, dataset): + model = ArcFaceORT(model_path=model_root) + model.check() + feat_mat = np.zeros(shape=(len(dataset), 2 * model.feat_dim)) + + def collate_fn(data): + return torch.cat(data, dim=0) + + data_loader = DataLoader( + dataset, batch_size=128, drop_last=False, num_workers=4, collate_fn=collate_fn, ) + num_iter = 0 + for batch in data_loader: + batch = batch.numpy() + batch = (batch - model.input_mean) / model.input_std + feat = model.session.run(model.output_names, {model.input_name: batch})[0] + feat = np.reshape(feat, (-1, model.feat_dim * 2)) + feat_mat[128 * num_iter: 128 * num_iter + feat.shape[0], :] = feat + num_iter += 1 + if num_iter % 50 == 0: + print(num_iter) + return feat_mat + + +def read_template_media_list(path): + ijb_meta = pd.read_csv(path, sep=' ', header=None).values + templates = ijb_meta[:, 1].astype(np.int) + medias = ijb_meta[:, 2].astype(np.int) + return templates, medias + + +def read_template_pair_list(path): + pairs = pd.read_csv(path, sep=' ', header=None).values + t1 = pairs[:, 0].astype(np.int) + t2 = pairs[:, 1].astype(np.int) + label = pairs[:, 2].astype(np.int) + return t1, t2, label + + +def read_image_feature(path): + with open(path, 'rb') as fid: + img_feats = pickle.load(fid) + return img_feats + + +def image2template_feature(img_feats=None, + templates=None, + medias=None): + unique_templates = np.unique(templates) + template_feats = np.zeros((len(unique_templates), img_feats.shape[1])) + for count_template, uqt in enumerate(unique_templates): + (ind_t,) = np.where(templates == uqt) + face_norm_feats = img_feats[ind_t] + face_medias = medias[ind_t] + unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True) + media_norm_feats = [] + for u, ct in zip(unique_medias, unique_media_counts): + (ind_m,) = np.where(face_medias == u) + if ct == 1: + media_norm_feats += [face_norm_feats[ind_m]] + else: # image features from the same video will be aggregated into one feature + media_norm_feats += [np.mean(face_norm_feats[ind_m], axis=0, keepdims=True), ] + media_norm_feats = np.array(media_norm_feats) + template_feats[count_template] = np.sum(media_norm_feats, axis=0) + if count_template % 2000 == 0: + print('Finish Calculating {} template features.'.format( + count_template)) + template_norm_feats = normalize(template_feats) + return template_norm_feats, unique_templates + + +def verification(template_norm_feats=None, + unique_templates=None, + p1=None, + p2=None): + template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) + for count_template, uqt in enumerate(unique_templates): + template2id[uqt] = count_template + score = np.zeros((len(p1),)) + total_pairs = np.array(range(len(p1))) + batchsize = 100000 + sublists = [total_pairs[i: i + batchsize] for i in range(0, len(p1), batchsize)] + total_sublists = len(sublists) + for c, s in enumerate(sublists): + feat1 = template_norm_feats[template2id[p1[s]]] + feat2 = template_norm_feats[template2id[p2[s]]] + similarity_score = np.sum(feat1 * feat2, -1) + score[s] = similarity_score.flatten() + if c % 10 == 0: + print('Finish {}/{} pairs.'.format(c, total_sublists)) + return score + + +def verification2(template_norm_feats=None, + unique_templates=None, + p1=None, + p2=None): + template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int) + for count_template, uqt in enumerate(unique_templates): + template2id[uqt] = count_template + score = np.zeros((len(p1),)) # save cosine distance between pairs + total_pairs = np.array(range(len(p1))) + batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation + sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)] + total_sublists = len(sublists) + for c, s in enumerate(sublists): + feat1 = template_norm_feats[template2id[p1[s]]] + feat2 = template_norm_feats[template2id[p2[s]]] + similarity_score = np.sum(feat1 * feat2, -1) + score[s] = similarity_score.flatten() + if c % 10 == 0: + print('Finish {}/{} pairs.'.format(c, total_sublists)) + return score + + +def main(args): + use_norm_score = True # if Ture, TestMode(N1) + use_detector_score = True # if Ture, TestMode(D1) + use_flip_test = True # if Ture, TestMode(F1) + assert args.target == 'IJBC' or args.target == 'IJBB' + + start = timeit.default_timer() + templates, medias = read_template_media_list( + os.path.join('%s/meta' % args.image_path, '%s_face_tid_mid.txt' % args.target.lower())) + stop = timeit.default_timer() + print('Time: %.2f s. ' % (stop - start)) + + start = timeit.default_timer() + p1, p2, label = read_template_pair_list( + os.path.join('%s/meta' % args.image_path, + '%s_template_pair_label.txt' % args.target.lower())) + stop = timeit.default_timer() + print('Time: %.2f s. ' % (stop - start)) + + start = timeit.default_timer() + img_path = '%s/loose_crop' % args.image_path + img_list_path = '%s/meta/%s_name_5pts_score.txt' % (args.image_path, args.target.lower()) + img_list = open(img_list_path) + files = img_list.readlines() + dataset = AlignedDataSet(root=img_path, lines=files, align=True) + img_feats = extract(args.model_root, dataset) + + faceness_scores = [] + for each_line in files: + name_lmk_score = each_line.split() + faceness_scores.append(name_lmk_score[-1]) + faceness_scores = np.array(faceness_scores).astype(np.float32) + stop = timeit.default_timer() + print('Time: %.2f s. ' % (stop - start)) + print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0], img_feats.shape[1])) + start = timeit.default_timer() + + if use_flip_test: + img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] + img_feats[:, img_feats.shape[1] // 2:] + else: + img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2] + + if use_norm_score: + img_input_feats = img_input_feats + else: + img_input_feats = img_input_feats / np.sqrt(np.sum(img_input_feats ** 2, -1, keepdims=True)) + + if use_detector_score: + print(img_input_feats.shape, faceness_scores.shape) + img_input_feats = img_input_feats * faceness_scores[:, np.newaxis] + else: + img_input_feats = img_input_feats + + template_norm_feats, unique_templates = image2template_feature( + img_input_feats, templates, medias) + stop = timeit.default_timer() + print('Time: %.2f s. ' % (stop - start)) + + start = timeit.default_timer() + score = verification(template_norm_feats, unique_templates, p1, p2) + stop = timeit.default_timer() + print('Time: %.2f s. ' % (stop - start)) + result_dir = args.model_root + + save_path = os.path.join(result_dir, "{}_result".format(args.target)) + if not os.path.exists(save_path): + os.makedirs(save_path) + score_save_file = os.path.join(save_path, "{}.npy".format(args.target)) + np.save(score_save_file, score) + files = [score_save_file] + methods = [] + scores = [] + for file in files: + methods.append(os.path.basename(file)) + scores.append(np.load(file)) + methods = np.array(methods) + scores = dict(zip(methods, scores)) + x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] + tpr_fpr_table = prettytable.PrettyTable(['Methods'] + [str(x) for x in x_labels]) + for method in methods: + fpr, tpr, _ = roc_curve(label, scores[method]) + fpr = np.flipud(fpr) + tpr = np.flipud(tpr) + tpr_fpr_row = [] + tpr_fpr_row.append("%s-%s" % (method, args.target)) + for fpr_iter in np.arange(len(x_labels)): + _, min_index = min( + list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) + tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) + tpr_fpr_table.add_row(tpr_fpr_row) + print(tpr_fpr_table) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='do ijb test') + # general + parser.add_argument('--model-root', default='', help='path to load model.') + parser.add_argument('--image-path', default='/train_tmp/IJB_release/IJBC', type=str, help='') + parser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB') + main(parser.parse_args()) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc.py new file mode 100644 index 00000000..eeff29d8 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc.py @@ -0,0 +1,531 @@ +import collections +from typing import Callable + +import torch +from torch import distributed +from torch.nn.functional import linear, normalize + + +class PartialFC(torch.nn.Module): + """ + https://arxiv.org/abs/2203.15565 + A distributed sparsely updating variant of the FC layer, named Partial FC (PFC). + + When sample rate less than 1, in each iteration, positive class centers and a random subset of + negative class centers are selected to compute the margin-based softmax loss, all class + centers are still maintained throughout the whole training process, but only a subset is + selected and updated in each iteration. + + .. note:: + When sample rate equal to 1, Partial FC is equal to model parallelism(default sample rate is 1). + + Example: + -------- + >>> module_pfc = PartialFC(embedding_size=512, num_classes=8000000, sample_rate=0.2) + >>> for img, labels in data_loader: + >>> embeddings = net(img) + >>> loss = module_pfc(embeddings, labels, optimizer) + >>> loss.backward() + >>> optimizer.step() + """ + _version = 1 + def __init__( + self, + margin_loss: Callable, + embedding_size: int, + num_classes: int, + sample_rate: float = 1.0, + fp16: bool = False, + ): + """ + Paramenters: + ----------- + embedding_size: int + The dimension of embedding, required + num_classes: int + Total number of classes, required + sample_rate: float + The rate of negative centers participating in the calculation, default is 1.0. + """ + super(PartialFC, self).__init__() + assert ( + distributed.is_initialized() + ), "must initialize distributed before create this" + self.rank = distributed.get_rank() + self.world_size = distributed.get_world_size() + + self.dist_cross_entropy = DistCrossEntropy() + self.embedding_size = embedding_size + self.sample_rate: float = sample_rate + self.fp16 = fp16 + self.num_local: int = num_classes // self.world_size + int( + self.rank < num_classes % self.world_size + ) + self.class_start: int = num_classes // self.world_size * self.rank + min( + self.rank, num_classes % self.world_size + ) + self.num_sample: int = int(self.sample_rate * self.num_local) + self.last_batch_size: int = 0 + self.weight: torch.Tensor + self.weight_mom: torch.Tensor + self.weight_activated: torch.nn.Parameter + self.weight_activated_mom: torch.Tensor + self.is_updated: bool = True + self.init_weight_update: bool = True + + if self.sample_rate < 1: + self.register_buffer("weight", + tensor=torch.normal(0, 0.01, (self.num_local, embedding_size))) + self.register_buffer("weight_mom", + tensor=torch.zeros_like(self.weight)) + self.register_parameter("weight_activated", + param=torch.nn.Parameter(torch.empty(0, 0))) + self.register_buffer("weight_activated_mom", + tensor=torch.empty(0, 0)) + self.register_buffer("weight_index", + tensor=torch.empty(0, 0)) + else: + self.weight_activated = torch.nn.Parameter(torch.normal(0, 0.01, (self.num_local, embedding_size))) + + # margin_loss + if isinstance(margin_loss, Callable): + self.margin_softmax = margin_loss + else: + raise + + @torch.no_grad() + def sample(self, + labels: torch.Tensor, + index_positive: torch.Tensor, + optimizer: torch.optim.Optimizer): + """ + This functions will change the value of labels + + Parameters: + ----------- + labels: torch.Tensor + pass + index_positive: torch.Tensor + pass + optimizer: torch.optim.Optimizer + pass + """ + positive = torch.unique(labels[index_positive], sorted=True).cuda() + if self.num_sample - positive.size(0) >= 0: + perm = torch.rand(size=[self.num_local]).cuda() + perm[positive] = 2.0 + index = torch.topk(perm, k=self.num_sample)[1].cuda() + index = index.sort()[0].cuda() + else: + index = positive + self.weight_index = index + + labels[index_positive] = torch.searchsorted(index, labels[index_positive]) + + self.weight_activated = torch.nn.Parameter(self.weight[self.weight_index]) + self.weight_activated_mom = self.weight_mom[self.weight_index] + + if isinstance(optimizer, torch.optim.SGD): + # TODO the params of partial fc must be last in the params list + optimizer.state.pop(optimizer.param_groups[-1]["params"][0], None) + optimizer.param_groups[-1]["params"][0] = self.weight_activated + optimizer.state[self.weight_activated][ + "momentum_buffer" + ] = self.weight_activated_mom + else: + raise + + @torch.no_grad() + def update(self): + """ partial weight to global + """ + if self.init_weight_update: + self.init_weight_update = False + return + + if self.sample_rate < 1: + self.weight[self.weight_index] = self.weight_activated + self.weight_mom[self.weight_index] = self.weight_activated_mom + + + def forward( + self, + local_embeddings: torch.Tensor, + local_labels: torch.Tensor, + optimizer: torch.optim.Optimizer, + ): + """ + Parameters: + ---------- + local_embeddings: torch.Tensor + feature embeddings on each GPU(Rank). + local_labels: torch.Tensor + labels on each GPU(Rank). + + Returns: + ------- + loss: torch.Tensor + pass + """ + local_labels.squeeze_() + local_labels = local_labels.long() + self.update() + + batch_size = local_embeddings.size(0) + if self.last_batch_size == 0: + self.last_batch_size = batch_size + assert self.last_batch_size == batch_size, ( + "last batch size do not equal current batch size: {} vs {}".format( + self.last_batch_size, batch_size)) + + _gather_embeddings = [ + torch.zeros((batch_size, self.embedding_size)).cuda() + for _ in range(self.world_size) + ] + _gather_labels = [ + torch.zeros(batch_size).long().cuda() for _ in range(self.world_size) + ] + _list_embeddings = AllGather(local_embeddings, *_gather_embeddings) + distributed.all_gather(_gather_labels, local_labels) + + embeddings = torch.cat(_list_embeddings) + labels = torch.cat(_gather_labels) + + labels = labels.view(-1, 1) + index_positive = (self.class_start <= labels) & ( + labels < self.class_start + self.num_local + ) + labels[~index_positive] = -1 + labels[index_positive] -= self.class_start + + if self.sample_rate < 1: + self.sample(labels, index_positive, optimizer) + + with torch.cuda.amp.autocast(self.fp16): + norm_embeddings = normalize(embeddings) + norm_weight_activated = normalize(self.weight_activated) + logits = linear(norm_embeddings, norm_weight_activated) + if self.fp16: + logits = logits.float() + logits = logits.clamp(-1, 1) + + logits = self.margin_softmax(logits, labels) + loss = self.dist_cross_entropy(logits, labels) + return loss + + def state_dict(self, destination=None, prefix="", keep_vars=False): + if destination is None: + destination = collections.OrderedDict() + destination._metadata = collections.OrderedDict() + + for name, module in self._modules.items(): + if module is not None: + module.state_dict(destination, prefix + name + ".", keep_vars=keep_vars) + if self.sample_rate < 1: + destination["weight"] = self.weight.detach() + else: + destination["weight"] = self.weight_activated.data.detach() + return destination + + def load_state_dict(self, state_dict, strict: bool = True): + if self.sample_rate < 1: + self.weight = state_dict["weight"].to(self.weight.device) + self.weight_mom.zero_() + self.weight_activated.data.zero_() + self.weight_activated_mom.zero_() + self.weight_index.zero_() + else: + self.weight_activated.data = state_dict["weight"].to(self.weight_activated.data.device) + + +class PartialFCAdamW(torch.nn.Module): + def __init__(self, + margin_loss: Callable, + embedding_size: int, + num_classes: int, + sample_rate: float = 1.0, + fp16: bool = False,): + """ + Paramenters: + ----------- + embedding_size: int + The dimension of embedding, required + num_classes: int + Total number of classes, required + sample_rate: float + The rate of negative centers participating in the calculation, default is 1.0. + """ + super(PartialFCAdamW, self).__init__() + assert ( + distributed.is_initialized() + ), "must initialize distributed before create this" + self.rank = distributed.get_rank() + self.world_size = distributed.get_world_size() + + self.dist_cross_entropy = DistCrossEntropy() + self.embedding_size = embedding_size + self.sample_rate: float = sample_rate + self.fp16 = fp16 + self.num_local: int = num_classes // self.world_size + int( + self.rank < num_classes % self.world_size + ) + self.class_start: int = num_classes // self.world_size * self.rank + min( + self.rank, num_classes % self.world_size + ) + self.num_sample: int = int(self.sample_rate * self.num_local) + self.last_batch_size: int = 0 + self.weight: torch.Tensor + self.weight_exp_avg: torch.Tensor + self.weight_exp_avg_sq: torch.Tensor + self.weight_activated: torch.nn.Parameter + self.weight_activated_exp_avg: torch.Tensor + self.weight_activated_exp_avg_sq: torch.Tensor + + self.is_updated: bool = True + self.init_weight_update: bool = True + + if self.sample_rate < 1: + self.register_buffer("weight", + tensor=torch.normal(0, 0.01, (self.num_local, embedding_size))) + self.register_buffer("weight_exp_avg", + tensor=torch.zeros_like(self.weight)) + self.register_buffer("weight_exp_avg_sq", + tensor=torch.zeros_like(self.weight)) + self.register_parameter("weight_activated", + param=torch.nn.Parameter(torch.empty(0, 0))) + self.register_buffer("weight_activated_exp_avg", + tensor=torch.empty(0, 0)) + self.register_buffer("weight_activated_exp_avg_sq", + tensor=torch.empty(0, 0)) + else: + self.weight_activated = torch.nn.Parameter( + torch.normal(0, 0.01, (self.num_local, embedding_size)) + ) + self.step = 0 + + if isinstance(margin_loss, Callable): + self.margin_softmax = margin_loss + else: + raise + + @torch.no_grad() + def sample(self, labels, index_positive, optimizer): + self.step += 1 + positive = torch.unique(labels[index_positive], sorted=True).cuda() + if self.num_sample - positive.size(0) >= 0: + perm = torch.rand(size=[self.num_local]).cuda() + perm[positive] = 2.0 + index = torch.topk(perm, k=self.num_sample)[1].cuda() + index = index.sort()[0].cuda() + else: + index = positive + self.weight_index = index + labels[index_positive] = torch.searchsorted(index, labels[index_positive]) + self.weight_activated = torch.nn.Parameter(self.weight[self.weight_index]) + self.weight_activated_exp_avg = self.weight_exp_avg[self.weight_index] + self.weight_activated_exp_avg_sq = self.weight_exp_avg_sq[self.weight_index] + + if isinstance(optimizer, (torch.optim.Adam, torch.optim.AdamW)): + # TODO the params of partial fc must be last in the params list + optimizer.state.pop(optimizer.param_groups[-1]["params"][0], None) + optimizer.param_groups[-1]["params"][0] = self.weight_activated + optimizer.state[self.weight_activated]["exp_avg"] = self.weight_activated_exp_avg + optimizer.state[self.weight_activated]["exp_avg_sq"] = self.weight_activated_exp_avg_sq + optimizer.state[self.weight_activated]["step"] = self.step + else: + raise + + @torch.no_grad() + def update(self): + """ partial weight to global + """ + if self.init_weight_update: + self.init_weight_update = False + return + + if self.sample_rate < 1: + self.weight[self.weight_index] = self.weight_activated + self.weight_exp_avg[self.weight_index] = self.weight_activated_exp_avg + self.weight_exp_avg_sq[self.weight_index] = self.weight_activated_exp_avg_sq + + def forward( + self, + local_embeddings: torch.Tensor, + local_labels: torch.Tensor, + optimizer: torch.optim.Optimizer, + ): + """ + Parameters: + ---------- + local_embeddings: torch.Tensor + feature embeddings on each GPU(Rank). + local_labels: torch.Tensor + labels on each GPU(Rank). + + Returns: + ------- + loss: torch.Tensor + pass + """ + local_labels.squeeze_() + local_labels = local_labels.long() + self.update() + + batch_size = local_embeddings.size(0) + if self.last_batch_size == 0: + self.last_batch_size = batch_size + assert self.last_batch_size == batch_size, ( + "last batch size do not equal current batch size: {} vs {}".format( + self.last_batch_size, batch_size)) + + _gather_embeddings = [ + torch.zeros((batch_size, self.embedding_size)).cuda() + for _ in range(self.world_size) + ] + _gather_labels = [ + torch.zeros(batch_size).long().cuda() for _ in range(self.world_size) + ] + _list_embeddings = AllGather(local_embeddings, *_gather_embeddings) + distributed.all_gather(_gather_labels, local_labels) + + embeddings = torch.cat(_list_embeddings) + labels = torch.cat(_gather_labels) + + labels = labels.view(-1, 1) + index_positive = (self.class_start <= labels) & ( + labels < self.class_start + self.num_local + ) + labels[~index_positive] = -1 + labels[index_positive] -= self.class_start + + if self.sample_rate < 1: + self.sample(labels, index_positive, optimizer) + + with torch.cuda.amp.autocast(self.fp16): + norm_embeddings = normalize(embeddings) + norm_weight_activated = normalize(self.weight_activated) + logits = linear(norm_embeddings, norm_weight_activated) + if self.fp16: + logits = logits.float() + logits = logits.clamp(-1, 1) + + logits = self.margin_softmax(logits, labels) + loss = self.dist_cross_entropy(logits, labels) + return loss + def state_dict(self, destination=None, prefix="", keep_vars=False): + if destination is None: + destination = collections.OrderedDict() + destination._metadata = collections.OrderedDict() + + for name, module in self._modules.items(): + if module is not None: + module.state_dict(destination, prefix + name + ".", keep_vars=keep_vars) + if self.sample_rate < 1: + destination["weight"] = self.weight.detach() + else: + destination["weight"] = self.weight_activated.data.detach() + return destination + + def load_state_dict(self, state_dict, strict: bool = True): + if self.sample_rate < 1: + self.weight = state_dict["weight"].to(self.weight.device) + self.weight_exp_avg.zero_() + self.weight_exp_avg_sq.zero_() + self.weight_activated.data.zero_() + self.weight_activated_exp_avg.zero_() + self.weight_activated_exp_avg_sq.zero_() + else: + self.weight_activated.data = state_dict["weight"].to(self.weight_activated.data.device) + + +class DistCrossEntropyFunc(torch.autograd.Function): + """ + CrossEntropy loss is calculated in parallel, allreduce denominator into single gpu and calculate softmax. + Implemented of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf): + """ + + @staticmethod + def forward(ctx, logits: torch.Tensor, label: torch.Tensor): + """ """ + batch_size = logits.size(0) + # for numerical stability + max_logits, _ = torch.max(logits, dim=1, keepdim=True) + # local to global + distributed.all_reduce(max_logits, distributed.ReduceOp.MAX) + logits.sub_(max_logits) + logits.exp_() + sum_logits_exp = torch.sum(logits, dim=1, keepdim=True) + # local to global + distributed.all_reduce(sum_logits_exp, distributed.ReduceOp.SUM) + logits.div_(sum_logits_exp) + index = torch.where(label != -1)[0] + # loss + loss = torch.zeros(batch_size, 1, device=logits.device) + loss[index] = logits[index].gather(1, label[index]) + distributed.all_reduce(loss, distributed.ReduceOp.SUM) + ctx.save_for_backward(index, logits, label) + return loss.clamp_min_(1e-30).log_().mean() * (-1) + + @staticmethod + def backward(ctx, loss_gradient): + """ + Args: + loss_grad (torch.Tensor): gradient backward by last layer + Returns: + gradients for each input in forward function + `None` gradients for one-hot label + """ + ( + index, + logits, + label, + ) = ctx.saved_tensors + batch_size = logits.size(0) + one_hot = torch.zeros( + size=[index.size(0), logits.size(1)], device=logits.device + ) + one_hot.scatter_(1, label[index], 1) + logits[index] -= one_hot + logits.div_(batch_size) + return logits * loss_gradient.item(), None + + +class DistCrossEntropy(torch.nn.Module): + def __init__(self): + super(DistCrossEntropy, self).__init__() + + def forward(self, logit_part, label_part): + return DistCrossEntropyFunc.apply(logit_part, label_part) + + +class AllGatherFunc(torch.autograd.Function): + """AllGather op with gradient backward""" + + @staticmethod + def forward(ctx, tensor, *gather_list): + gather_list = list(gather_list) + distributed.all_gather(gather_list, tensor) + return tuple(gather_list) + + @staticmethod + def backward(ctx, *grads): + grad_list = list(grads) + rank = distributed.get_rank() + grad_out = grad_list[rank] + + dist_ops = [ + distributed.reduce(grad_out, rank, distributed.ReduceOp.SUM, async_op=True) + if i == rank + else distributed.reduce( + grad_list[i], i, distributed.ReduceOp.SUM, async_op=True + ) + for i in range(distributed.get_world_size()) + ] + for _op in dist_ops: + _op.wait() + + grad_out *= len(grad_list) # cooperate with distributed loss function + return (grad_out, *[None for _ in range(len(grad_list))]) + + +AllGather = AllGatherFunc.apply diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc_v2.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc_v2.py new file mode 100644 index 00000000..0752554c --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/partial_fc_v2.py @@ -0,0 +1,260 @@ + +import math +from typing import Callable + +import torch +from torch import distributed +from torch.nn.functional import linear, normalize + + +class PartialFC_V2(torch.nn.Module): + """ + https://arxiv.org/abs/2203.15565 + A distributed sparsely updating variant of the FC layer, named Partial FC (PFC). + When sample rate less than 1, in each iteration, positive class centers and a random subset of + negative class centers are selected to compute the margin-based softmax loss, all class + centers are still maintained throughout the whole training process, but only a subset is + selected and updated in each iteration. + .. note:: + When sample rate equal to 1, Partial FC is equal to model parallelism(default sample rate is 1). + Example: + -------- + >>> module_pfc = PartialFC(embedding_size=512, num_classes=8000000, sample_rate=0.2) + >>> for img, labels in data_loader: + >>> embeddings = net(img) + >>> loss = module_pfc(embeddings, labels) + >>> loss.backward() + >>> optimizer.step() + """ + _version = 2 + + def __init__( + self, + margin_loss: Callable, + embedding_size: int, + num_classes: int, + sample_rate: float = 1.0, + fp16: bool = False, + ): + """ + Paramenters: + ----------- + embedding_size: int + The dimension of embedding, required + num_classes: int + Total number of classes, required + sample_rate: float + The rate of negative centers participating in the calculation, default is 1.0. + """ + super(PartialFC_V2, self).__init__() + assert ( + distributed.is_initialized() + ), "must initialize distributed before create this" + self.rank = distributed.get_rank() + self.world_size = distributed.get_world_size() + + self.dist_cross_entropy = DistCrossEntropy() + self.embedding_size = embedding_size + self.sample_rate: float = sample_rate + self.fp16 = fp16 + self.num_local: int = num_classes // self.world_size + int( + self.rank < num_classes % self.world_size + ) + self.class_start: int = num_classes // self.world_size * self.rank + min( + self.rank, num_classes % self.world_size + ) + self.num_sample: int = int(self.sample_rate * self.num_local) + self.last_batch_size: int = 0 + + self.is_updated: bool = True + self.init_weight_update: bool = True + self.weight = torch.nn.Parameter(torch.normal(0, 0.01, (self.num_local, embedding_size))) + + # margin_loss + if isinstance(margin_loss, Callable): + self.margin_softmax = margin_loss + else: + raise + + def sample(self, labels, index_positive): + """ + This functions will change the value of labels + Parameters: + ----------- + labels: torch.Tensor + pass + index_positive: torch.Tensor + pass + optimizer: torch.optim.Optimizer + pass + """ + with torch.no_grad(): + positive = torch.unique(labels[index_positive], sorted=True).cuda() + if self.num_sample - positive.size(0) >= 0: + perm = torch.rand(size=[self.num_local]).cuda() + perm[positive] = 2.0 + index = torch.topk(perm, k=self.num_sample)[1].cuda() + index = index.sort()[0].cuda() + else: + index = positive + self.weight_index = index + + labels[index_positive] = torch.searchsorted(index, labels[index_positive]) + + return self.weight[self.weight_index] + + def forward( + self, + local_embeddings: torch.Tensor, + local_labels: torch.Tensor, + ): + """ + Parameters: + ---------- + local_embeddings: torch.Tensor + feature embeddings on each GPU(Rank). + local_labels: torch.Tensor + labels on each GPU(Rank). + Returns: + ------- + loss: torch.Tensor + pass + """ + local_labels.squeeze_() + local_labels = local_labels.long() + + batch_size = local_embeddings.size(0) + if self.last_batch_size == 0: + self.last_batch_size = batch_size + assert self.last_batch_size == batch_size, ( + f"last batch size do not equal current batch size: {self.last_batch_size} vs {batch_size}") + + _gather_embeddings = [ + torch.zeros((batch_size, self.embedding_size)).cuda() + for _ in range(self.world_size) + ] + _gather_labels = [ + torch.zeros(batch_size).long().cuda() for _ in range(self.world_size) + ] + _list_embeddings = AllGather(local_embeddings, *_gather_embeddings) + distributed.all_gather(_gather_labels, local_labels) + + embeddings = torch.cat(_list_embeddings) + labels = torch.cat(_gather_labels) + + labels = labels.view(-1, 1) + index_positive = (self.class_start <= labels) & ( + labels < self.class_start + self.num_local + ) + labels[~index_positive] = -1 + labels[index_positive] -= self.class_start + + if self.sample_rate < 1: + weight = self.sample(labels, index_positive) + else: + weight = self.weight + + with torch.cuda.amp.autocast(self.fp16): + norm_embeddings = normalize(embeddings) + norm_weight_activated = normalize(weight) + logits = linear(norm_embeddings, norm_weight_activated) + if self.fp16: + logits = logits.float() + logits = logits.clamp(-1, 1) + + logits = self.margin_softmax(logits, labels) + loss = self.dist_cross_entropy(logits, labels) + return loss + + +class DistCrossEntropyFunc(torch.autograd.Function): + """ + CrossEntropy loss is calculated in parallel, allreduce denominator into single gpu and calculate softmax. + Implemented of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf): + """ + + @staticmethod + def forward(ctx, logits: torch.Tensor, label: torch.Tensor): + """ """ + batch_size = logits.size(0) + # for numerical stability + max_logits, _ = torch.max(logits, dim=1, keepdim=True) + # local to global + distributed.all_reduce(max_logits, distributed.ReduceOp.MAX) + logits.sub_(max_logits) + logits.exp_() + sum_logits_exp = torch.sum(logits, dim=1, keepdim=True) + # local to global + distributed.all_reduce(sum_logits_exp, distributed.ReduceOp.SUM) + logits.div_(sum_logits_exp) + index = torch.where(label != -1)[0] + # loss + loss = torch.zeros(batch_size, 1, device=logits.device) + loss[index] = logits[index].gather(1, label[index]) + distributed.all_reduce(loss, distributed.ReduceOp.SUM) + ctx.save_for_backward(index, logits, label) + return loss.clamp_min_(1e-30).log_().mean() * (-1) + + @staticmethod + def backward(ctx, loss_gradient): + """ + Args: + loss_grad (torch.Tensor): gradient backward by last layer + Returns: + gradients for each input in forward function + `None` gradients for one-hot label + """ + ( + index, + logits, + label, + ) = ctx.saved_tensors + batch_size = logits.size(0) + one_hot = torch.zeros( + size=[index.size(0), logits.size(1)], device=logits.device + ) + one_hot.scatter_(1, label[index], 1) + logits[index] -= one_hot + logits.div_(batch_size) + return logits * loss_gradient.item(), None + + +class DistCrossEntropy(torch.nn.Module): + def __init__(self): + super(DistCrossEntropy, self).__init__() + + def forward(self, logit_part, label_part): + return DistCrossEntropyFunc.apply(logit_part, label_part) + + +class AllGatherFunc(torch.autograd.Function): + """AllGather op with gradient backward""" + + @staticmethod + def forward(ctx, tensor, *gather_list): + gather_list = list(gather_list) + distributed.all_gather(gather_list, tensor) + return tuple(gather_list) + + @staticmethod + def backward(ctx, *grads): + grad_list = list(grads) + rank = distributed.get_rank() + grad_out = grad_list[rank] + + dist_ops = [ + distributed.reduce(grad_out, rank, distributed.ReduceOp.SUM, async_op=True) + if i == rank + else distributed.reduce( + grad_list[i], i, distributed.ReduceOp.SUM, async_op=True + ) + for i in range(distributed.get_world_size()) + ] + for _op in dist_ops: + _op.wait() + + grad_out *= len(grad_list) # cooperate with distributed loss function + return (grad_out, *[None for _ in range(len(grad_list))]) + + +AllGather = AllGatherFunc.apply diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/requirement.txt b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/requirement.txt new file mode 100644 index 00000000..f1a431ef --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/requirement.txt @@ -0,0 +1,6 @@ +tensorboard +easydict +mxnet +onnx +sklearn +opencv-python \ No newline at end of file diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/run.sh b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/run.sh new file mode 100644 index 00000000..6eacdf8e --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/run.sh @@ -0,0 +1 @@ +CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 torchrun --nproc_per_node=8 train_v2.py $@ diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/scripts/shuffle_rec.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/scripts/shuffle_rec.py new file mode 100644 index 00000000..f3b68e93 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/scripts/shuffle_rec.py @@ -0,0 +1,81 @@ +import argparse +import multiprocessing +import os +import time + +import mxnet as mx +import numpy as np + + +def read_worker(args, q_in): + path_imgidx = os.path.join(args.input, "train.idx") + path_imgrec = os.path.join(args.input, "train.rec") + imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, "r") + + s = imgrec.read_idx(0) + header, _ = mx.recordio.unpack(s) + assert header.flag > 0 + + imgidx = np.array(range(1, int(header.label[0]))) + np.random.shuffle(imgidx) + + for idx in imgidx: + item = imgrec.read_idx(idx) + q_in.put(item) + + q_in.put(None) + imgrec.close() + + +def write_worker(args, q_out): + pre_time = time.time() + + if args.input[-1] == '/': + args.input = args.input[:-1] + dirname = os.path.dirname(args.input) + basename = os.path.basename(args.input) + output = os.path.join(dirname, f"shuffled_{basename}") + os.makedirs(output, exist_ok=True) + + path_imgidx = os.path.join(output, "train.idx") + path_imgrec = os.path.join(output, "train.rec") + save_record = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, "w") + more = True + count = 0 + while more: + deq = q_out.get() + if deq is None: + more = False + else: + header, jpeg = mx.recordio.unpack(deq) + # TODO it is currently not fully developed + if isinstance(header.label, float): + label = header.label + else: + label = header.label[0] + + header = mx.recordio.IRHeader(flag=header.flag, label=label, id=header.id, id2=header.id2) + save_record.write_idx(count, mx.recordio.pack(header, jpeg)) + count += 1 + if count % 10000 == 0: + cur_time = time.time() + print('save time:', cur_time - pre_time, ' count:', count) + pre_time = cur_time + print(count) + save_record.close() + + +def main(args): + queue = multiprocessing.Queue(10240) + read_process = multiprocessing.Process(target=read_worker, args=(args, queue)) + read_process.daemon = True + read_process.start() + write_process = multiprocessing.Process(target=write_worker, args=(args, queue)) + write_process.start() + write_process.join() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('input', help='path to source rec.') + main(parser.parse_args()) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/torch2onnx.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/torch2onnx.py new file mode 100644 index 00000000..f6055d1f --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/torch2onnx.py @@ -0,0 +1,53 @@ +import numpy as np +import onnx +import torch + + +def convert_onnx(net, path_module, output, opset=11, simplify=False): + assert isinstance(net, torch.nn.Module) + img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) + img = img.astype(np.float) + img = (img / 255. - 0.5) / 0.5 # torch style norm + img = img.transpose((2, 0, 1)) + img = torch.from_numpy(img).unsqueeze(0).float() + + weight = torch.load(path_module) + net.load_state_dict(weight, strict=True) + net.eval() + torch.onnx.export(net, img, output, input_names=["data"], keep_initializers_as_inputs=False, verbose=False, opset_version=opset) + model = onnx.load(output) + graph = model.graph + graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None' + if simplify: + from onnxsim import simplify + model, check = simplify(model) + assert check, "Simplified ONNX model could not be validated" + onnx.save(model, output) + + +if __name__ == '__main__': + import os + import argparse + from backbones import get_model + + parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx') + parser.add_argument('input', type=str, help='input backbone.pth file or path') + parser.add_argument('--output', type=str, default=None, help='output onnx path') + parser.add_argument('--network', type=str, default=None, help='backbone network') + parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify') + args = parser.parse_args() + input_file = args.input + if os.path.isdir(input_file): + input_file = os.path.join(input_file, "model.pt") + assert os.path.exists(input_file) + # model_name = os.path.basename(os.path.dirname(input_file)).lower() + # params = model_name.split("_") + # if len(params) >= 3 and params[1] in ('arcface', 'cosface'): + # if args.network is None: + # args.network = params[2] + assert args.network is not None + print(args) + backbone_onnx = get_model(args.network, dropout=0.0, fp16=False, num_features=512) + if args.output is None: + args.output = os.path.join(os.path.dirname(args.input), "model.onnx") + convert_onnx(backbone_onnx, input_file, args.output, simplify=args.simplify) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train.py new file mode 100644 index 00000000..b4b49e71 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train.py @@ -0,0 +1,260 @@ +import argparse +import logging +import os +from datetime import datetime + +import numpy as np +import torch +from backbones import get_model +from dataset import get_dataloader +from losses import CombinedMarginLoss +from lr_scheduler import PolyScheduler +from partial_fc import PartialFC, PartialFCAdamW +from torch import distributed +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter +from utils.utils_callbacks import CallBackLogging, CallBackVerification +from utils.utils_config import get_config +from utils.utils_distributed_sampler import setup_seed +from utils.utils_logging import AverageMeter, init_logging + +assert torch.__version__ >= "1.12.0", "In order to enjoy the features of the new torch, \ +we have upgraded the torch to 1.12.0. torch before than 1.12.0 may not work in the future." + +try: + rank = int(os.environ["RANK"]) + local_rank = int(os.environ["LOCAL_RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + distributed.init_process_group("nccl") +except KeyError: + rank = 0 + local_rank = 0 + world_size = 1 + distributed.init_process_group( + backend="nccl", + init_method="tcp://127.0.0.1:12584", + rank=rank, + world_size=world_size, + ) + + +def main(args): + + # get config + cfg = get_config(args.config) + # global control random seed + setup_seed(seed=cfg.seed, cuda_deterministic=False) + + torch.cuda.set_device(local_rank) + + os.makedirs(cfg.output, exist_ok=True) + init_logging(rank, cfg.output) + + summary_writer = ( + SummaryWriter(log_dir=os.path.join(cfg.output, "tensorboard")) + if rank == 0 + else None + ) + + wandb_logger = None + if cfg.using_wandb: + import wandb + # Sign in to wandb + try: + wandb.login(key=cfg.wandb_key) + except Exception as e: + print("WandB Key must be provided in config file (base.py).") + print(f"Config Error: {e}") + # Initialize wandb + run_name = datetime.now().strftime("%y%m%d_%H%M") + f"_GPU{rank}" + run_name = run_name if cfg.suffix_run_name is None else run_name + f"_{cfg.suffix_run_name}" + try: + wandb_logger = wandb.init( + entity = cfg.wandb_entity, + project = cfg.wandb_project, + sync_tensorboard = True, + resume=cfg.wandb_resume, + name = run_name, + notes = cfg.notes) if rank == 0 or cfg.wandb_log_all else None + if wandb_logger: + wandb_logger.config.update(cfg) + except Exception as e: + print("WandB Data (Entity and Project name) must be provided in config file (base.py).") + print(f"Config Error: {e}") + + train_loader = get_dataloader( + cfg.rec, + local_rank, + cfg.batch_size, + cfg.dali, + cfg.seed, + cfg.num_workers + ) + + backbone = get_model( + cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() + + backbone = torch.nn.parallel.DistributedDataParallel( + module=backbone, broadcast_buffers=False, device_ids=[local_rank], bucket_cap_mb=16, + find_unused_parameters=True) + + backbone.train() + # FIXME using gradient checkpoint if there are some unused parameters will cause error + backbone._set_static_graph() + + margin_loss = CombinedMarginLoss( + 64, + cfg.margin_list[0], + cfg.margin_list[1], + cfg.margin_list[2], + cfg.interclass_filtering_threshold + ) + + if cfg.optimizer == "sgd": + module_partial_fc = PartialFC( + margin_loss, cfg.embedding_size, cfg.num_classes, + cfg.sample_rate, cfg.fp16) + module_partial_fc.train().cuda() + # TODO the params of partial fc must be last in the params list + opt = torch.optim.SGD( + params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], + lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) + + elif cfg.optimizer == "adamw": + module_partial_fc = PartialFCAdamW( + margin_loss, cfg.embedding_size, cfg.num_classes, + cfg.sample_rate, cfg.fp16) + module_partial_fc.train().cuda() + opt = torch.optim.AdamW( + params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], + lr=cfg.lr, weight_decay=cfg.weight_decay) + else: + raise + + cfg.total_batch_size = cfg.batch_size * world_size + cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch + cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch + + lr_scheduler = PolyScheduler( + optimizer=opt, + base_lr=cfg.lr, + max_steps=cfg.total_step, + warmup_steps=cfg.warmup_step, + last_epoch=-1 + ) + + start_epoch = 0 + global_step = 0 + if cfg.resume: + dict_checkpoint = torch.load(os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) + start_epoch = dict_checkpoint["epoch"] + global_step = dict_checkpoint["global_step"] + backbone.module.load_state_dict(dict_checkpoint["state_dict_backbone"]) + module_partial_fc.load_state_dict(dict_checkpoint["state_dict_softmax_fc"]) + opt.load_state_dict(dict_checkpoint["state_optimizer"]) + lr_scheduler.load_state_dict(dict_checkpoint["state_lr_scheduler"]) + del dict_checkpoint + + for key, value in cfg.items(): + num_space = 25 - len(key) + logging.info(": " + key + " " * num_space + str(value)) + + callback_verification = CallBackVerification( + val_targets=cfg.val_targets, rec_prefix=cfg.rec, + summary_writer=summary_writer, wandb_logger = wandb_logger + ) + callback_logging = CallBackLogging( + frequent=cfg.frequent, + total_step=cfg.total_step, + batch_size=cfg.batch_size, + start_step = global_step, + writer=summary_writer + ) + + loss_am = AverageMeter() + amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) + + for epoch in range(start_epoch, cfg.num_epoch): + + if isinstance(train_loader, DataLoader): + train_loader.sampler.set_epoch(epoch) + for _, (img, local_labels) in enumerate(train_loader): + global_step += 1 + local_embeddings = backbone(img) + loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) + + if cfg.fp16: + amp.scale(loss).backward() + amp.unscale_(opt) + torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) + amp.step(opt) + amp.update() + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) + opt.step() + + opt.zero_grad() + lr_scheduler.step() + + with torch.no_grad(): + if wandb_logger: + wandb_logger.log({ + 'Loss/Step Loss': loss.item(), + 'Loss/Train Loss': loss_am.avg, + 'Process/Step': global_step, + 'Process/Epoch': epoch + }) + + loss_am.update(loss.item(), 1) + callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) + + if global_step % cfg.verbose == 0 and global_step > 0: + callback_verification(global_step, backbone) + + if cfg.save_all_states: + checkpoint = { + "epoch": epoch + 1, + "global_step": global_step, + "state_dict_backbone": backbone.module.state_dict(), + "state_dict_softmax_fc": module_partial_fc.state_dict(), + "state_optimizer": opt.state_dict(), + "state_lr_scheduler": lr_scheduler.state_dict() + } + torch.save(checkpoint, os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) + + if rank == 0: + path_module = os.path.join(cfg.output, "model.pt") + torch.save(backbone.module.state_dict(), path_module) + + if wandb_logger and cfg.save_artifacts: + artifact_name = f"{run_name}_E{epoch}" + model = wandb.Artifact(artifact_name, type='model') + model.add_file(path_module) + wandb_logger.log_artifact(model) + + if cfg.dali: + train_loader.reset() + + if rank == 0: + path_module = os.path.join(cfg.output, "model.pt") + torch.save(backbone.module.state_dict(), path_module) + + from torch2onnx import convert_onnx + convert_onnx(backbone.module.cpu().eval(), path_module, os.path.join(cfg.output, "model.onnx")) + + if wandb_logger and cfg.save_artifacts: + artifact_name = f"{run_name}_Final" + model = wandb.Artifact(artifact_name, type='model') + model.add_file(path_module) + wandb_logger.log_artifact(model) + + distributed.destroy_process_group() + + +if __name__ == "__main__": + torch.backends.cudnn.benchmark = True + parser = argparse.ArgumentParser( + description="Distributed Arcface Training in Pytorch") + parser.add_argument("config", type=str, help="py config file") + main(parser.parse_args()) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train_v2.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train_v2.py new file mode 100644 index 00000000..5d53e801 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/train_v2.py @@ -0,0 +1,258 @@ +import argparse +import logging +import os +from datetime import datetime + +import numpy as np +import torch +from backbones import get_model +from dataset import get_dataloader +from losses import CombinedMarginLoss +from lr_scheduler import PolyScheduler +from partial_fc_v2 import PartialFC_V2 +from torch import distributed +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter +from utils.utils_callbacks import CallBackLogging, CallBackVerification +from utils.utils_config import get_config +from utils.utils_distributed_sampler import setup_seed +from utils.utils_logging import AverageMeter, init_logging + +assert torch.__version__ >= "1.12.0", "In order to enjoy the features of the new torch, \ +we have upgraded the torch to 1.12.0. torch before than 1.12.0 may not work in the future." + +try: + rank = int(os.environ["RANK"]) + local_rank = int(os.environ["LOCAL_RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + distributed.init_process_group("nccl") +except KeyError: + rank = 0 + local_rank = 0 + world_size = 1 + distributed.init_process_group( + backend="nccl", + init_method="tcp://127.0.0.1:12584", + rank=rank, + world_size=world_size, + ) + + +def main(args): + + # get config + cfg = get_config(args.config) + # global control random seed + setup_seed(seed=cfg.seed, cuda_deterministic=False) + + torch.cuda.set_device(local_rank) + + os.makedirs(cfg.output, exist_ok=True) + init_logging(rank, cfg.output) + + summary_writer = ( + SummaryWriter(log_dir=os.path.join(cfg.output, "tensorboard")) + if rank == 0 + else None + ) + + wandb_logger = None + if cfg.using_wandb: + import wandb + # Sign in to wandb + try: + wandb.login(key=cfg.wandb_key) + except Exception as e: + print("WandB Key must be provided in config file (base.py).") + print(f"Config Error: {e}") + # Initialize wandb + run_name = datetime.now().strftime("%y%m%d_%H%M") + f"_GPU{rank}" + run_name = run_name if cfg.suffix_run_name is None else run_name + f"_{cfg.suffix_run_name}" + try: + wandb_logger = wandb.init( + entity = cfg.wandb_entity, + project = cfg.wandb_project, + sync_tensorboard = True, + resume=cfg.wandb_resume, + name = run_name, + notes = cfg.notes) if rank == 0 or cfg.wandb_log_all else None + if wandb_logger: + wandb_logger.config.update(cfg) + except Exception as e: + print("WandB Data (Entity and Project name) must be provided in config file (base.py).") + print(f"Config Error: {e}") + + train_loader = get_dataloader( + cfg.rec, + local_rank, + cfg.batch_size, + cfg.dali, + cfg.seed, + cfg.num_workers + ) + + backbone = get_model( + cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() + + backbone = torch.nn.parallel.DistributedDataParallel( + module=backbone, broadcast_buffers=False, device_ids=[local_rank], bucket_cap_mb=16, + find_unused_parameters=True) + + backbone.train() + # FIXME using gradient checkpoint if there are some unused parameters will cause error + backbone._set_static_graph() + + margin_loss = CombinedMarginLoss( + 64, + cfg.margin_list[0], + cfg.margin_list[1], + cfg.margin_list[2], + cfg.interclass_filtering_threshold + ) + + if cfg.optimizer == "sgd": + module_partial_fc = PartialFC_V2( + margin_loss, cfg.embedding_size, cfg.num_classes, + cfg.sample_rate, cfg.fp16) + module_partial_fc.train().cuda() + # TODO the params of partial fc must be last in the params list + opt = torch.optim.SGD( + params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], + lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) + + elif cfg.optimizer == "adamw": + module_partial_fc = PartialFC_V2( + margin_loss, cfg.embedding_size, cfg.num_classes, + cfg.sample_rate, cfg.fp16) + module_partial_fc.train().cuda() + opt = torch.optim.AdamW( + params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}], + lr=cfg.lr, weight_decay=cfg.weight_decay) + else: + raise + + cfg.total_batch_size = cfg.batch_size * world_size + cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch + cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch + + lr_scheduler = PolyScheduler( + optimizer=opt, + base_lr=cfg.lr, + max_steps=cfg.total_step, + warmup_steps=cfg.warmup_step, + last_epoch=-1 + ) + + start_epoch = 0 + global_step = 0 + if cfg.resume: + dict_checkpoint = torch.load(os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) + start_epoch = dict_checkpoint["epoch"] + global_step = dict_checkpoint["global_step"] + backbone.module.load_state_dict(dict_checkpoint["state_dict_backbone"]) + module_partial_fc.load_state_dict(dict_checkpoint["state_dict_softmax_fc"]) + opt.load_state_dict(dict_checkpoint["state_optimizer"]) + lr_scheduler.load_state_dict(dict_checkpoint["state_lr_scheduler"]) + del dict_checkpoint + + for key, value in cfg.items(): + num_space = 25 - len(key) + logging.info(": " + key + " " * num_space + str(value)) + + callback_verification = CallBackVerification( + val_targets=cfg.val_targets, rec_prefix=cfg.rec, + summary_writer=summary_writer, wandb_logger = wandb_logger + ) + callback_logging = CallBackLogging( + frequent=cfg.frequent, + total_step=cfg.total_step, + batch_size=cfg.batch_size, + start_step = global_step, + writer=summary_writer + ) + + loss_am = AverageMeter() + amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) + + for epoch in range(start_epoch, cfg.num_epoch): + + if isinstance(train_loader, DataLoader): + train_loader.sampler.set_epoch(epoch) + for _, (img, local_labels) in enumerate(train_loader): + global_step += 1 + local_embeddings = backbone(img) + loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels) + + if cfg.fp16: + amp.scale(loss).backward() + if global_step % cfg.gradient_acc == 0: + amp.unscale_(opt) + torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) + amp.step(opt) + amp.update() + opt.zero_grad() + else: + loss.backward() + if global_step % cfg.gradient_acc == 0: + torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) + opt.step() + opt.zero_grad() + lr_scheduler.step() + + with torch.no_grad(): + if wandb_logger: + wandb_logger.log({ + 'Loss/Step Loss': loss.item(), + 'Loss/Train Loss': loss_am.avg, + 'Process/Step': global_step, + 'Process/Epoch': epoch + }) + + loss_am.update(loss.item(), 1) + callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) + + if global_step % cfg.verbose == 0 and global_step > 0: + callback_verification(global_step, backbone) + + if cfg.save_all_states: + checkpoint = { + "epoch": epoch + 1, + "global_step": global_step, + "state_dict_backbone": backbone.module.state_dict(), + "state_dict_softmax_fc": module_partial_fc.state_dict(), + "state_optimizer": opt.state_dict(), + "state_lr_scheduler": lr_scheduler.state_dict() + } + torch.save(checkpoint, os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt")) + + if rank == 0: + path_module = os.path.join(cfg.output, "model.pt") + torch.save(backbone.module.state_dict(), path_module) + + if wandb_logger and cfg.save_artifacts: + artifact_name = f"{run_name}_E{epoch}" + model = wandb.Artifact(artifact_name, type='model') + model.add_file(path_module) + wandb_logger.log_artifact(model) + + if cfg.dali: + train_loader.reset() + + if rank == 0: + path_module = os.path.join(cfg.output, "model.pt") + torch.save(backbone.module.state_dict(), path_module) + + if wandb_logger and cfg.save_artifacts: + artifact_name = f"{run_name}_Final" + model = wandb.Artifact(artifact_name, type='model') + model.add_file(path_module) + wandb_logger.log_artifact(model) + + + +if __name__ == "__main__": + torch.backends.cudnn.benchmark = True + parser = argparse.ArgumentParser( + description="Distributed Arcface Training in Pytorch") + parser.add_argument("config", type=str, help="py config file") + main(parser.parse_args()) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/plot.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/plot.py new file mode 100644 index 00000000..7f1d39da --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/plot.py @@ -0,0 +1,71 @@ +import os +import sys + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap +from prettytable import PrettyTable +from sklearn.metrics import roc_curve, auc + +with open(sys.argv[1], "r") as f: + files = f.readlines() + +files = [x.strip() for x in files] +image_path = "/train_tmp/IJB_release/IJBC" + + +def read_template_pair_list(path): + pairs = pd.read_csv(path, sep=' ', header=None).values + t1 = pairs[:, 0].astype(np.int) + t2 = pairs[:, 1].astype(np.int) + label = pairs[:, 2].astype(np.int) + return t1, t2, label + + +p1, p2, label = read_template_pair_list( + os.path.join('%s/meta' % image_path, + '%s_template_pair_label.txt' % 'ijbc')) + +methods = [] +scores = [] +for file in files: + methods.append(file) + scores.append(np.load(file)) + +methods = np.array(methods) +scores = dict(zip(methods, scores)) +colours = dict( + zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) +x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] +tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) +fig = plt.figure() +for method in methods: + fpr, tpr, _ = roc_curve(label, scores[method]) + roc_auc = auc(fpr, tpr) + fpr = np.flipud(fpr) + tpr = np.flipud(tpr) # select largest tpr at same fpr + plt.plot(fpr, + tpr, + color=colours[method], + lw=1, + label=('[%s (AUC = %0.4f %%)]' % + (method.split('-')[-1], roc_auc * 100))) + tpr_fpr_row = [] + tpr_fpr_row.append(method) + for fpr_iter in np.arange(len(x_labels)): + _, min_index = min( + list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) + tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) + tpr_fpr_table.add_row(tpr_fpr_row) +plt.xlim([10 ** -6, 0.1]) +plt.ylim([0.3, 1.0]) +plt.grid(linestyle='--', linewidth=1) +plt.xticks(x_labels) +plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) +plt.xscale('log') +plt.xlabel('False Positive Rate') +plt.ylabel('True Positive Rate') +plt.title('ROC on IJB') +plt.legend(loc="lower right") +print(tpr_fpr_table) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_callbacks.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_callbacks.py new file mode 100644 index 00000000..d9368073 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_callbacks.py @@ -0,0 +1,125 @@ +import logging +import os +import time +from typing import List + +import torch + +from eval import verification +from utils.utils_logging import AverageMeter +from torch.utils.tensorboard import SummaryWriter +from torch import distributed + + +class CallBackVerification(object): + + def __init__(self, val_targets, rec_prefix, summary_writer=None, image_size=(112, 112), wandb_logger=None): + self.rank: int = distributed.get_rank() + self.highest_acc: float = 0.0 + self.highest_acc_list: List[float] = [0.0] * len(val_targets) + self.ver_list: List[object] = [] + self.ver_name_list: List[str] = [] + if self.rank is 0: + self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size) + + self.summary_writer = summary_writer + self.wandb_logger = wandb_logger + + def ver_test(self, backbone: torch.nn.Module, global_step: int): + results = [] + for i in range(len(self.ver_list)): + acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test( + self.ver_list[i], backbone, 10, 10) + logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm)) + logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2)) + + self.summary_writer: SummaryWriter + self.summary_writer.add_scalar(tag=self.ver_name_list[i], scalar_value=acc2, global_step=global_step, ) + if self.wandb_logger: + import wandb + self.wandb_logger.log({ + f'Acc/val-Acc1 {self.ver_name_list[i]}': acc1, + f'Acc/val-Acc2 {self.ver_name_list[i]}': acc2, + # f'Acc/val-std1 {self.ver_name_list[i]}': std1, + # f'Acc/val-std2 {self.ver_name_list[i]}': acc2, + }) + + if acc2 > self.highest_acc_list[i]: + self.highest_acc_list[i] = acc2 + logging.info( + '[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i])) + results.append(acc2) + + def init_dataset(self, val_targets, data_dir, image_size): + for name in val_targets: + path = os.path.join(data_dir, name + ".bin") + if os.path.exists(path): + data_set = verification.load_bin(path, image_size) + self.ver_list.append(data_set) + self.ver_name_list.append(name) + + def __call__(self, num_update, backbone: torch.nn.Module): + if self.rank is 0 and num_update > 0: + backbone.eval() + self.ver_test(backbone, num_update) + backbone.train() + + +class CallBackLogging(object): + def __init__(self, frequent, total_step, batch_size, start_step=0,writer=None): + self.frequent: int = frequent + self.rank: int = distributed.get_rank() + self.world_size: int = distributed.get_world_size() + self.time_start = time.time() + self.total_step: int = total_step + self.start_step: int = start_step + self.batch_size: int = batch_size + self.writer = writer + + self.init = False + self.tic = 0 + + def __call__(self, + global_step: int, + loss: AverageMeter, + epoch: int, + fp16: bool, + learning_rate: float, + grad_scaler: torch.cuda.amp.GradScaler): + if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0: + if self.init: + try: + speed: float = self.frequent * self.batch_size / (time.time() - self.tic) + speed_total = speed * self.world_size + except ZeroDivisionError: + speed_total = float('inf') + + #time_now = (time.time() - self.time_start) / 3600 + #time_total = time_now / ((global_step + 1) / self.total_step) + #time_for_end = time_total - time_now + time_now = time.time() + time_sec = int(time_now - self.time_start) + time_sec_avg = time_sec / (global_step - self.start_step + 1) + eta_sec = time_sec_avg * (self.total_step - global_step - 1) + time_for_end = eta_sec/3600 + if self.writer is not None: + self.writer.add_scalar('time_for_end', time_for_end, global_step) + self.writer.add_scalar('learning_rate', learning_rate, global_step) + self.writer.add_scalar('loss', loss.avg, global_step) + if fp16: + msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.6f Epoch: %d Global Step: %d " \ + "Fp16 Grad Scale: %2.f Required: %1.f hours" % ( + speed_total, loss.avg, learning_rate, epoch, global_step, + grad_scaler.get_scale(), time_for_end + ) + else: + msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.6f Epoch: %d Global Step: %d " \ + "Required: %1.f hours" % ( + speed_total, loss.avg, learning_rate, epoch, global_step, time_for_end + ) + logging.info(msg) + loss.reset() + self.tic = time.time() + else: + self.init = True + self.tic = time.time() diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_config.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_config.py new file mode 100644 index 00000000..0c02eaf7 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_config.py @@ -0,0 +1,16 @@ +import importlib +import os.path as osp + + +def get_config(config_file): + assert config_file.startswith('configs/'), 'config file setting must start with configs/' + temp_config_name = osp.basename(config_file) + temp_module_name = osp.splitext(temp_config_name)[0] + config = importlib.import_module("configs.base") + cfg = config.config + config = importlib.import_module("configs.%s" % temp_module_name) + job_cfg = config.config + cfg.update(job_cfg) + if cfg.output is None: + cfg.output = osp.join('work_dirs', temp_module_name) + return cfg \ No newline at end of file diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_distributed_sampler.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_distributed_sampler.py new file mode 100644 index 00000000..cea67039 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_distributed_sampler.py @@ -0,0 +1,126 @@ +import math +import os +import random + +import numpy as np +import torch +import torch.distributed as dist +from torch.utils.data import DistributedSampler as _DistributedSampler + + +def setup_seed(seed, cuda_deterministic=True): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + if cuda_deterministic: # slower, more reproducible + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + else: # faster, less reproducible + torch.backends.cudnn.deterministic = False + torch.backends.cudnn.benchmark = True + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + torch.manual_seed(worker_seed) + + +def get_dist_info(): + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + + return rank, world_size + + +def sync_random_seed(seed=None, device="cuda"): + """Make sure different ranks share the same seed. + All workers must call this function, otherwise it will deadlock. + This method is generally used in `DistributedSampler`, + because the seed should be identical across all processes + in the distributed group. + In distributed sampling, different ranks should sample non-overlapped + data in the dataset. Therefore, this function is used to make sure that + each rank shuffles the data indices in the same order based + on the same seed. Then different ranks could use different indices + to select non-overlapped data from the same data list. + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + Returns: + int: Seed to be used. + """ + if seed is None: + seed = np.random.randint(2**31) + assert isinstance(seed, int) + + rank, world_size = get_dist_info() + + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + + dist.broadcast(random_num, src=0) + + return random_num.item() + + +class DistributedSampler(_DistributedSampler): + def __init__( + self, + dataset, + num_replicas=None, # world_size + rank=None, # local_rank + shuffle=True, + seed=0, + ): + + super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + + # In distributed sampling, different ranks should sample + # non-overlapped data in the dataset. Therefore, this function + # is used to make sure that each rank shuffles the data indices + # in the same order based on the same seed. Then different ranks + # could use different indices to select non-overlapped data from the + # same data list. + self.seed = sync_random_seed(seed) + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + # When :attr:`shuffle=True`, this ensures all replicas + # use a different random ordering for each epoch. + # Otherwise, the next iteration of this sampler will + # yield the same ordering. + g.manual_seed(self.epoch + self.seed) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + # in case that indices is shorter than half of total_size + indices = (indices * math.ceil(self.total_size / len(indices)))[ + : self.total_size + ] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank : self.total_size : self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_logging.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_logging.py new file mode 100644 index 00000000..c787b6aa --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/arcface_torch/utils/utils_logging.py @@ -0,0 +1,41 @@ +import logging +import os +import sys + + +class AverageMeter(object): + """Computes and stores the average and current value + """ + + def __init__(self): + self.val = None + self.avg = None + self.sum = None + self.count = None + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def init_logging(rank, models_root): + if rank == 0: + log_root = logging.getLogger() + log_root.setLevel(logging.INFO) + formatter = logging.Formatter("Training: %(asctime)s-%(message)s") + handler_file = logging.FileHandler(os.path.join(models_root, "training.log")) + handler_stream = logging.StreamHandler(sys.stdout) + handler_file.setFormatter(formatter) + handler_stream.setFormatter(formatter) + log_root.addHandler(handler_file) + log_root.addHandler(handler_stream) + log_root.info('rank_id: %d' % rank) diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/base_model.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/base_model.py new file mode 100644 index 00000000..2a05d3a0 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/base_model.py @@ -0,0 +1,316 @@ +"""This script defines the base network model for Deep3DFaceRecon_pytorch +""" + +import os +import numpy as np +import torch +from collections import OrderedDict +from abc import ABC, abstractmethod +from . import networks + + +class BaseModel(ABC): + """This class is an abstract base class (ABC) for models. + To create a subclass, you need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate losses, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this fucntion, you should first call + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): specify the images that you want to display and save. + -- self.visual_names (str list): define networks used in our training. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.isTrain = opt.isTrain + self.device = torch.device('cpu') + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.parallel_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def dict_grad_hook_factory(add_func=lambda x: x): + saved_dict = dict() + + def hook_gen(name): + def grad_hook(grad): + saved_vals = add_func(grad) + saved_dict[name] = saved_vals + return grad_hook + return hook_gen, saved_dict + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions and .""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + + if not self.isTrain or opt.continue_train: + load_suffix = opt.epoch + self.load_networks(load_suffix) + + + # self.print_networks(opt.verbose) + + def parallelize(self, convert_sync_batchnorm=True): + if not self.opt.use_ddp: + for name in self.parallel_names: + if isinstance(name, str): + module = getattr(self, name) + setattr(self, name, module.to(self.device)) + else: + for name in self.model_names: + if isinstance(name, str): + module = getattr(self, name) + if convert_sync_batchnorm: + module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module) + setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device), + device_ids=[self.device.index], + find_unused_parameters=True, broadcast_buffers=True)) + + # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient. + for name in self.parallel_names: + if isinstance(name, str) and name not in self.model_names: + module = getattr(self, name) + setattr(self, name, module.to(self.device)) + + # put state_dict of optimizer to gpu device + if self.opt.phase != 'test': + if self.opt.continue_train: + for optim in self.optimizers: + for state in optim.state.values(): + for k, v in state.items(): + if isinstance(v, torch.Tensor): + state[k] = v.to(self.device) + + def data_dependent_initialize(self, data): + pass + + def train(self): + """Make models train mode""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + net.train() + + def eval(self): + """Make models eval mode""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + net.eval() + + def test(self): + """Forward function used in test time. + + This function wraps function in no_grad() so we don't save intermediate steps for backprop + It also calls to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self, name='A'): + """ Return image paths that are used to load current data""" + return self.image_paths if name =='A' else self.image_paths_B + + def update_learning_rate(self): + """Update learning rates for all the networks; called at the end of every epoch""" + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + scheduler.step(self.metric) + else: + scheduler.step() + + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate = %.7f' % lr) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name)[:, :3, ...] + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, epoch): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + if not os.path.isdir(self.save_dir): + os.makedirs(self.save_dir) + + save_filename = 'epoch_%s.pth' % (epoch) + save_path = os.path.join(self.save_dir, save_filename) + + save_dict = {} + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + if isinstance(net, torch.nn.DataParallel) or isinstance(net, + torch.nn.parallel.DistributedDataParallel): + net = net.module + save_dict[name] = net.state_dict() + + + for i, optim in enumerate(self.optimizers): + save_dict['opt_%02d'%i] = optim.state_dict() + + for i, sched in enumerate(self.schedulers): + save_dict['sched_%02d'%i] = sched.state_dict() + + torch.save(save_dict, save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, epoch): + """Load all the networks from the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + if self.opt.isTrain and self.opt.pretrained_name is not None: + load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) + else: + load_dir = self.save_dir + load_filename = 'epoch_%s.pth' % (epoch) + load_path = os.path.join(load_dir, load_filename) + state_dict = torch.load(load_path, map_location=self.device) + print('loading the model from %s' % load_path) + + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + net.load_state_dict(state_dict[name]) + + if self.opt.phase != 'test': + if self.opt.continue_train: + print('loading the optim from %s' % load_path) + for i, optim in enumerate(self.optimizers): + optim.load_state_dict(state_dict['opt_%02d'%i]) + + try: + print('loading the sched from %s' % load_path) + for i, sched in enumerate(self.schedulers): + sched.load_state_dict(state_dict['sched_%02d'%i]) + except: + print('Failed to load schedulers, set schedulers according to epoch count manually') + for i, sched in enumerate(self.schedulers): + sched.last_epoch = self.opt.epoch_count - 1 + + + + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad + + def generate_visuals_for_evaluation(self, data, mode): + return {} diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/bfm.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/bfm.py new file mode 100644 index 00000000..e2b7cc34 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/bfm.py @@ -0,0 +1,299 @@ +"""This script defines the parametric 3d face model for Deep3DFaceRecon_pytorch +""" + +import numpy as np +import torch +import torch.nn.functional as F +from scipy.io import loadmat +from deep_3drecon.util.load_mats import transferBFM09 +import os + +def perspective_projection(focal, center): + # return p.T (N, 3) @ (3, 3) + return np.array([ + focal, 0, center, + 0, focal, center, + 0, 0, 1 + ]).reshape([3, 3]).astype(np.float32).transpose() + +class SH: + def __init__(self): + self.a = [np.pi, 2 * np.pi / np.sqrt(3.), 2 * np.pi / np.sqrt(8.)] + self.c = [1/np.sqrt(4 * np.pi), np.sqrt(3.) / np.sqrt(4 * np.pi), 3 * np.sqrt(5.) / np.sqrt(12 * np.pi)] + + + +class ParametricFaceModel: + def __init__(self, + bfm_folder='./BFM', + recenter=True, + camera_distance=10., + init_lit=np.array([ + 0.8, 0, 0, 0, 0, 0, 0, 0, 0 + ]), + focal=1015., + center=112., + is_train=True, + default_name='BFM_model_front.mat'): + + if not os.path.isfile(os.path.join(bfm_folder, default_name)): + transferBFM09(bfm_folder) + model = loadmat(os.path.join(bfm_folder, default_name)) + # mean face shape. [3*N,1] + self.mean_shape = model['meanshape'].astype(np.float32) + # identity basis. [3*N,80] + self.id_base = model['idBase'].astype(np.float32) + # expression basis. [3*N,64] + self.exp_base = model['exBase'].astype(np.float32) + # mean face texture. [3*N,1] (0-255) + self.mean_tex = model['meantex'].astype(np.float32) + # texture basis. [3*N,80] + self.tex_base = model['texBase'].astype(np.float32) + # face indices for each vertex that lies in. starts from 0. [N,8] + self.point_buf = model['point_buf'].astype(np.int64) - 1 + # vertex indices for each face. starts from 0. [F,3] + self.face_buf = model['tri'].astype(np.int64) - 1 + # vertex indices for 68 landmarks. starts from 0. [68,1] + self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1 + + if is_train: + # vertex indices for small face region to compute photometric error. starts from 0. + self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1 + # vertex indices for each face from small face region. starts from 0. [f,3] + self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1 + # vertex indices for pre-defined skin region to compute reflectance loss + self.skin_mask = np.squeeze(model['skinmask']) + + if recenter: + mean_shape = self.mean_shape.reshape([-1, 3]) + mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True) + self.mean_shape = mean_shape.reshape([-1, 1]) + + self.persc_proj = perspective_projection(focal, center) + self.device = 'cpu' + self.camera_distance = camera_distance + self.SH = SH() + self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32) + + + def to(self, device): + self.device = device + for key, value in self.__dict__.items(): + if type(value).__module__ == np.__name__: + setattr(self, key, torch.tensor(value).to(device)) + + + def compute_shape(self, id_coeff, exp_coeff): + """ + Return: + face_shape -- torch.tensor, size (B, N, 3) + + Parameters: + id_coeff -- torch.tensor, size (B, 80), identity coeffs + exp_coeff -- torch.tensor, size (B, 64), expression coeffs + """ + batch_size = id_coeff.shape[0] + id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff) + exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff) + face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1]) + return face_shape.reshape([batch_size, -1, 3]) + + + def compute_texture(self, tex_coeff, normalize=True): + """ + Return: + face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.) + + Parameters: + tex_coeff -- torch.tensor, size (B, 80) + """ + batch_size = tex_coeff.shape[0] + face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex + if normalize: + face_texture = face_texture / 255. + return face_texture.reshape([batch_size, -1, 3]) + + + def compute_norm(self, face_shape): + """ + Return: + vertex_norm -- torch.tensor, size (B, N, 3) + + Parameters: + face_shape -- torch.tensor, size (B, N, 3) + """ + + v1 = face_shape[:, self.face_buf[:, 0]] + v2 = face_shape[:, self.face_buf[:, 1]] + v3 = face_shape[:, self.face_buf[:, 2]] + e1 = v1 - v2 + e2 = v2 - v3 + face_norm = torch.cross(e1, e2, dim=-1) + face_norm = F.normalize(face_norm, dim=-1, p=2) + face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1) + + vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2) + vertex_norm = F.normalize(vertex_norm, dim=-1, p=2) + return vertex_norm + + + def compute_color(self, face_texture, face_norm, gamma): + """ + Return: + face_color -- torch.tensor, size (B, N, 3), range (0, 1.) + + Parameters: + face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.) + face_norm -- torch.tensor, size (B, N, 3), rotated face normal + gamma -- torch.tensor, size (B, 27), SH coeffs + """ + batch_size = gamma.shape[0] + v_num = face_texture.shape[1] + a, c = self.SH.a, self.SH.c + gamma = gamma.reshape([batch_size, 3, 9]) + gamma = gamma + self.init_lit + gamma = gamma.permute(0, 2, 1) + Y = torch.cat([ + a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device), + -a[1] * c[1] * face_norm[..., 1:2], + a[1] * c[1] * face_norm[..., 2:], + -a[1] * c[1] * face_norm[..., :1], + a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2], + -a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:], + 0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1), + -a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:], + 0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2) + ], dim=-1) + r = Y @ gamma[..., :1] + g = Y @ gamma[..., 1:2] + b = Y @ gamma[..., 2:] + face_color = torch.cat([r, g, b], dim=-1) * face_texture + return face_color + + + def compute_rotation(self, angles): + """ + Return: + rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat + + Parameters: + angles -- torch.tensor, size (B, 3), radian + """ + + batch_size = angles.shape[0] + ones = torch.ones([batch_size, 1]).to(self.device) + zeros = torch.zeros([batch_size, 1]).to(self.device) + x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:], + + rot_x = torch.cat([ + ones, zeros, zeros, + zeros, torch.cos(x), -torch.sin(x), + zeros, torch.sin(x), torch.cos(x) + ], dim=1).reshape([batch_size, 3, 3]) + + rot_y = torch.cat([ + torch.cos(y), zeros, torch.sin(y), + zeros, ones, zeros, + -torch.sin(y), zeros, torch.cos(y) + ], dim=1).reshape([batch_size, 3, 3]) + + rot_z = torch.cat([ + torch.cos(z), -torch.sin(z), zeros, + torch.sin(z), torch.cos(z), zeros, + zeros, zeros, ones + ], dim=1).reshape([batch_size, 3, 3]) + + rot = rot_z @ rot_y @ rot_x + return rot.permute(0, 2, 1) + + + def to_camera(self, face_shape): + face_shape[..., -1] = self.camera_distance - face_shape[..., -1] + return face_shape + + def to_image(self, face_shape): + """ + Return: + face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction + + Parameters: + face_shape -- torch.tensor, size (B, N, 3) + """ + # to image_plane + face_proj = face_shape @ self.persc_proj + face_proj = face_proj[..., :2] / face_proj[..., 2:] + + return face_proj + + + def transform(self, face_shape, rot, trans): + """ + Return: + face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans + + Parameters: + face_shape -- torch.tensor, size (B, N, 3) + rot -- torch.tensor, size (B, 3, 3) + trans -- torch.tensor, size (B, 3) + """ + return face_shape @ rot + trans.unsqueeze(1) + + + def get_landmarks(self, face_proj): + """ + Return: + face_lms -- torch.tensor, size (B, 68, 2) + + Parameters: + face_proj -- torch.tensor, size (B, N, 2) + """ + return face_proj[:, self.keypoints] + + def split_coeff(self, coeffs): + """ + Return: + coeffs_dict -- a dict of torch.tensors + + Parameters: + coeffs -- torch.tensor, size (B, 256) + """ + id_coeffs = coeffs[:, :80] + exp_coeffs = coeffs[:, 80: 144] + tex_coeffs = coeffs[:, 144: 224] + angles = coeffs[:, 224: 227] + gammas = coeffs[:, 227: 254] + translations = coeffs[:, 254:] + return { + 'id': id_coeffs, + 'exp': exp_coeffs, + 'tex': tex_coeffs, + 'angle': angles, + 'gamma': gammas, + 'trans': translations + } + def compute_for_render(self, coeffs): + """ + Return: + face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate + face_color -- torch.tensor, size (B, N, 3), in RGB order + landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction + Parameters: + coeffs -- torch.tensor, size (B, 257) + """ + coef_dict = self.split_coeff(coeffs) + face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp']) + rotation = self.compute_rotation(coef_dict['angle']) + + + face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans']) + face_vertex = self.to_camera(face_shape_transformed) + + face_proj = self.to_image(face_vertex) + landmark = self.get_landmarks(face_proj) + + face_texture = self.compute_texture(coef_dict['tex']) + face_norm = self.compute_norm(face_shape) + face_norm_roted = face_norm @ rotation + face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma']) + + return face_vertex, face_texture, face_color, landmark diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/facerecon_model.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/facerecon_model.py new file mode 100644 index 00000000..c5659b24 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/facerecon_model.py @@ -0,0 +1,228 @@ +"""This script defines the face reconstruction model for Deep3DFaceRecon_pytorch +""" + +import numpy as np +import torch +from .base_model import BaseModel +from . import networks +from .bfm import ParametricFaceModel +from .losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss +from deep_3drecon.util import util +from deep_3drecon.util.mesh_renderer import MeshRenderer +from deep_3drecon.util.preprocess import estimate_norm_torch + +import trimesh +from scipy.io import savemat + +class FaceReconModel(BaseModel): + + @staticmethod + def modify_commandline_options(parser, is_train=True): + """ Configures options specific for CUT model + """ + # net structure and parameters + parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure') + parser.add_argument('--init_path', type=str, default='checkpoints/init_model/resnet50-0676ba61.pth') + parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc') + parser.add_argument('--bfm_folder', type=str, default='./deep_3drecon/BFM') + parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model') + + # renderer parameters + parser.add_argument('--focal', type=float, default=1015.) + parser.add_argument('--center', type=float, default=112.) + parser.add_argument('--camera_d', type=float, default=10.) + parser.add_argument('--z_near', type=float, default=5.) + parser.add_argument('--z_far', type=float, default=15.) + parser.add_argument('--use_opengl', type=util.str2bool, nargs='?', const=True, default=False, help='use opengl context or not') + + if is_train: + # training parameters + parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure') + parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth') + parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss') + parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face') + + + # augmentation parameters + parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels') + parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor') + parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree') + + # loss weights + parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss') + parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss') + parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss') + parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss') + parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss') + parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss') + parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss') + parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss') + parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss') + + + + opt, _ = parser.parse_known_args() + parser.set_defaults( + focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15. + ) + if is_train: + parser.set_defaults( + use_crop_face=True, use_predef_M=False + ) + return parser + + def __init__(self, opt): + """Initialize this model class. + + Parameters: + opt -- training/test options + + A few things can be done here. + - (required) call the initialization function of BaseModel + - define loss function, visualization images, model names, and optimizers + """ + BaseModel.__init__(self, opt) # call the initialization method of BaseModel + + self.visual_names = ['output_vis'] + self.model_names = ['net_recon'] + self.parallel_names = self.model_names + ['renderer'] + + self.net_recon = networks.define_net_recon( + net_recon=opt.net_recon, use_last_fc=opt.use_last_fc, init_path=opt.init_path + ) + + self.facemodel = ParametricFaceModel( + bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center, + is_train=self.isTrain, default_name=opt.bfm_model + ) + + fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi + self.renderer = MeshRenderer( + rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center), use_opengl=opt.use_opengl + ) + + if self.isTrain: + self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc'] + + self.net_recog = networks.define_net_recog( + net_recog=opt.net_recog, pretrained_path=opt.net_recog_path + ) + # loss func name: (compute_%s_loss) % loss_name + self.compute_feat_loss = perceptual_loss + self.comupte_color_loss = photo_loss + self.compute_lm_loss = landmark_loss + self.compute_reg_loss = reg_loss + self.compute_reflc_loss = reflectance_loss + + self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr) + self.optimizers = [self.optimizer] + self.parallel_names += ['net_recog'] + # Our program will automatically call to define schedulers, load networks, and print networks + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input: a dictionary that contains the data itself and its metadata information. + """ + self.input_img = input['imgs'].to(self.device) + self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None + self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None + self.trans_m = input['M'].to(self.device) if 'M' in input else None + self.image_paths = input['im_paths'] if 'im_paths' in input else None + + def forward(self): + output_coeff = self.net_recon(self.input_img) + self.facemodel.to(self.device) + self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \ + self.facemodel.compute_for_render(output_coeff) + self.pred_mask, _, self.pred_face = self.renderer( + self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color) + + self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff) + self.output_coeff = output_coeff + + def compute_losses(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + + assert self.net_recog.training == False + trans_m = self.trans_m + if not self.opt.use_predef_M: + trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2]) + + pred_feat = self.net_recog(self.pred_face, trans_m) + gt_feat = self.net_recog(self.input_img, self.trans_m) + self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat) + + face_mask = self.pred_mask + if self.opt.use_crop_face: + face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf) + + face_mask = face_mask.detach() + self.loss_color = self.opt.w_color * self.comupte_color_loss( + self.pred_face, self.input_img, self.atten_mask * face_mask) + + loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt) + self.loss_reg = self.opt.w_reg * loss_reg + self.loss_gamma = self.opt.w_gamma * loss_gamma + + self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm) + + self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask) + + self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \ + + self.loss_lm + self.loss_reflc + + + def optimize_parameters(self, isTrain=True): + self.forward() + self.compute_losses() + """Update network weights; it will be called in every training iteration.""" + if isTrain: + self.optimizer.zero_grad() + self.loss_all.backward() + self.optimizer.step() + + def compute_visuals(self): + with torch.no_grad(): + input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy() + output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img + output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy() + + if self.gt_lm is not None: + gt_lm_numpy = self.gt_lm.cpu().numpy() + pred_lm_numpy = self.pred_lm.detach().cpu().numpy() + output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b') + output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r') + + output_vis_numpy = np.concatenate((input_img_numpy, + output_vis_numpy_raw, output_vis_numpy), axis=-2) + else: + output_vis_numpy = np.concatenate((input_img_numpy, + output_vis_numpy_raw), axis=-2) + + self.output_vis = torch.tensor( + output_vis_numpy / 255., dtype=torch.float32 + ).permute(0, 3, 1, 2).to(self.device) + + def save_mesh(self, name): + + recon_shape = self.pred_vertex # get reconstructed shape + recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space + recon_shape = recon_shape.cpu().numpy()[0] + recon_color = self.pred_color + recon_color = recon_color.cpu().numpy()[0] + tri = self.facemodel.face_buf.cpu().numpy() + mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8), process=False) + mesh.export(name) + + def save_coeff(self,name): + + pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict} + pred_lm = self.pred_lm.cpu().numpy() + pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate + pred_coeffs['lm68'] = pred_lm + savemat(name,pred_coeffs) + + + diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/losses.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/losses.py new file mode 100644 index 00000000..fbacb63b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/losses.py @@ -0,0 +1,113 @@ +import numpy as np +import torch +import torch.nn as nn +from kornia.geometry import warp_affine +import torch.nn.functional as F + +def resize_n_crop(image, M, dsize=112): + # image: (b, c, h, w) + # M : (b, 2, 3) + return warp_affine(image, M, dsize=(dsize, dsize)) + +### perceptual level loss +class PerceptualLoss(nn.Module): + def __init__(self, recog_net, input_size=112): + super(PerceptualLoss, self).__init__() + self.recog_net = recog_net + self.preprocess = lambda x: 2 * x - 1 + self.input_size=input_size + def forward(imageA, imageB, M): + """ + 1 - cosine distance + Parameters: + imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order + imageB --same as imageA + """ + + imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size)) + imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size)) + + # freeze bn + self.recog_net.eval() + + id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2) + id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2) + cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) + # assert torch.sum((cosine_d > 1).float()) == 0 + return torch.sum(1 - cosine_d) / cosine_d.shape[0] + +def perceptual_loss(id_featureA, id_featureB): + cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) + # assert torch.sum((cosine_d > 1).float()) == 0 + return torch.sum(1 - cosine_d) / cosine_d.shape[0] + +### image level loss +def photo_loss(imageA, imageB, mask, eps=1e-6): + """ + l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) + Parameters: + imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order + imageB --same as imageA + """ + loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask + loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device)) + return loss + +def landmark_loss(predict_lm, gt_lm, weight=None): + """ + weighted mse loss + Parameters: + predict_lm --torch.tensor (B, 68, 2) + gt_lm --torch.tensor (B, 68, 2) + weight --numpy.array (1, 68) + """ + if not weight: + weight = np.ones([68]) + weight[28:31] = 20 + weight[-8:] = 20 + weight = np.expand_dims(weight, 0) + weight = torch.tensor(weight).to(predict_lm.device) + loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight + loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1]) + return loss + + +### regulization +def reg_loss(coeffs_dict, opt=None): + """ + l2 norm without the sqrt, from yu's implementation (mse) + tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss + Parameters: + coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans + + """ + # coefficient regularization to ensure plausible 3d faces + if opt: + w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex + else: + w_id, w_exp, w_tex = 1, 1, 1, 1 + creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \ + w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \ + w_tex * torch.sum(coeffs_dict['tex'] ** 2) + creg_loss = creg_loss / coeffs_dict['id'].shape[0] + + # gamma regularization to ensure a nearly-monochromatic light + gamma = coeffs_dict['gamma'].reshape([-1, 3, 9]) + gamma_mean = torch.mean(gamma, dim=1, keepdims=True) + gamma_loss = torch.mean((gamma - gamma_mean) ** 2) + + return creg_loss, gamma_loss + +def reflectance_loss(texture, mask): + """ + minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo + Parameters: + texture --torch.tensor, (B, N, 3) + mask --torch.tensor, (N), 1 or 0 + + """ + mask = mask.reshape([1, mask.shape[0], 1]) + texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask) + loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask)) + return loss + diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/networks.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/networks.py new file mode 100644 index 00000000..685750de --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/networks.py @@ -0,0 +1,522 @@ +"""This script defines deep neural networks for Deep3DFaceRecon_pytorch +""" + +import os +import numpy as np +import torch.nn.functional as F +from torch.nn import init +import functools +from torch.optim import lr_scheduler +import torch +from torch import Tensor +import torch.nn as nn +try: + from torch.hub import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url +from typing import Type, Any, Callable, Union, List, Optional +from .arcface_torch.backbones import get_model +from kornia.geometry import warp_affine + + +def resize_n_crop(image, M, dsize=112): + # image: (b, c, h, w) + # M : (b, 2, 3) + return warp_affine(image, M, dsize=(dsize, dsize)) + +def filter_state_dict(state_dict, remove_name='fc'): + new_state_dict = {} + for key in state_dict: + if remove_name in key: + continue + new_state_dict[key] = state_dict[key] + return new_state_dict + +def get_scheduler(optimizer, opt): + """Return a learning rate scheduler + + Parameters: + optimizer -- the optimizer of the network + opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  + opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine + + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_epochs, gamma=0.2) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def define_net_recon(net_recon, use_last_fc=False, init_path=None): + return ReconNetWrapper(net_recon, use_last_fc=use_last_fc, init_path=init_path) + +def define_net_recog(net_recog, pretrained_path=None): + net = RecogNetWrapper(net_recog=net_recog, pretrained_path=pretrained_path) + net.eval() + return net + +class ReconNetWrapper(nn.Module): + fc_dim=257 + def __init__(self, net_recon, use_last_fc=False, init_path=None): + super(ReconNetWrapper, self).__init__() + self.use_last_fc = use_last_fc + if net_recon not in func_dict: + return NotImplementedError('network [%s] is not implemented', net_recon) + func, last_dim = func_dict[net_recon] + backbone = func(use_last_fc=use_last_fc, num_classes=self.fc_dim) + if init_path and os.path.isfile(init_path): + state_dict = filter_state_dict(torch.load(init_path, map_location='cpu')) + backbone.load_state_dict(state_dict) + print("loading init net_recon %s from %s" %(net_recon, init_path)) + self.backbone = backbone + if not use_last_fc: + self.final_layers = nn.ModuleList([ + conv1x1(last_dim, 80, bias=True), # id layer + conv1x1(last_dim, 64, bias=True), # exp layer + conv1x1(last_dim, 80, bias=True), # tex layer + conv1x1(last_dim, 3, bias=True), # angle layer + conv1x1(last_dim, 27, bias=True), # gamma layer + conv1x1(last_dim, 2, bias=True), # tx, ty + conv1x1(last_dim, 1, bias=True) # tz + ]) + for m in self.final_layers: + nn.init.constant_(m.weight, 0.) + nn.init.constant_(m.bias, 0.) + + def forward(self, x): + x = self.backbone(x) + if not self.use_last_fc: + output = [] + for layer in self.final_layers: + output.append(layer(x)) + x = torch.flatten(torch.cat(output, dim=1), 1) + return x + + +class RecogNetWrapper(nn.Module): + def __init__(self, net_recog, pretrained_path=None, input_size=112): + super(RecogNetWrapper, self).__init__() + net = get_model(name=net_recog, fp16=False) + if pretrained_path: + state_dict = torch.load(pretrained_path, map_location='cpu') + net.load_state_dict(state_dict) + print("loading pretrained net_recog %s from %s" %(net_recog, pretrained_path)) + for param in net.parameters(): + param.requires_grad = False + self.net = net + self.preprocess = lambda x: 2 * x - 1 + self.input_size=input_size + + def forward(self, image, M): + image = self.preprocess(resize_n_crop(image, M, self.input_size)) + id_feature = F.normalize(self.net(image), dim=-1, p=2) + return id_feature + + +# adapted from https://github.com/pytorch/vision/edit/master/torchvision/models/resnet.py +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes: int, out_planes: int, stride: int = 1, bias: bool = False) -> nn.Conv2d: + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias) + + +class BasicBlock(nn.Module): + expansion: int = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion: int = 4 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__( + self, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + num_classes: int = 1000, + zero_init_residual: bool = False, + use_last_fc: bool = False, + groups: int = 1, + width_per_group: int = 64, + replace_stride_with_dilation: Optional[List[bool]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.use_last_fc = use_last_fc + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + if self.use_last_fc: + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] + + def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, + stride: int = 1, dilate: bool = False) -> nn.Sequential: + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x: Tensor) -> Tensor: + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + if self.use_last_fc: + x = torch.flatten(x, 1) + x = self.fc(x) + return x + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def _resnet( + arch: str, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + pretrained: bool, + progress: bool, + **kwargs: Any +) -> ResNet: + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" `_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" `_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +func_dict = { + 'resnet18': (resnet18, 512), + 'resnet50': (resnet50, 2048) +} diff --git a/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/template_model.py b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/template_model.py new file mode 100644 index 00000000..dac7b33d --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/deep_3drecon_models/template_model.py @@ -0,0 +1,100 @@ +"""Model class template + +This module provides a template for users to implement custom models. +You can specify '--model template' to use this model. +The class name should be consistent with both the filename and its model option. +The filename should be _dataset.py +The class name should be Dataset.py +It implements a simple image-to-image translation baseline based on regression loss. +Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: + min_ ||netG(data_A) - data_B||_1 +You need to implement the following functions: + : Add model-specific options and rewrite default values for existing options. + <__init__>: Initialize this model class. + : Unpack input data and perform data pre-processing. + : Run forward pass. This will be called by both and . + : Update network weights; it will be called in every training iteration. +""" +import numpy as np +import torch +from .base_model import BaseModel +from . import networks + + +class TemplateModel(BaseModel): + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new model-specific options and rewrite default values for existing options. + + Parameters: + parser -- the option parser + is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. + if is_train: + parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. + + return parser + + def __init__(self, opt): + """Initialize this model class. + + Parameters: + opt -- training/test options + + A few things can be done here. + - (required) call the initialization function of BaseModel + - define loss function, visualization images, model names, and optimizers + """ + BaseModel.__init__(self, opt) # call the initialization method of BaseModel + # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. + self.loss_names = ['loss_G'] + # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. + self.visual_names = ['data_A', 'data_B', 'output'] + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. + # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. + self.model_names = ['G'] + # define networks; you can use opt.isTrain to specify different behaviors for training and test. + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) + if self.isTrain: # only defined during training time + # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. + # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) + self.criterionLoss = torch.nn.L1Loss() + # define and initialize optimizers. You can define one optimizer for each network. + # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers = [self.optimizer] + + # Our program will automatically call to define schedulers, load networks, and print networks + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input: a dictionary that contains the data itself and its metadata information. + """ + AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B + self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A + self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B + self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths + + def forward(self): + """Run forward pass. This will be called by both functions and .""" + self.output = self.netG(self.data_A) # generate output image given the input data_A + + def backward(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # caculate the intermediate results if necessary; here self.output has been computed during function + # calculate loss given the input and intermediate results + self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression + self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G + + def optimize_parameters(self): + """Update network weights; it will be called in every training iteration.""" + self.forward() # first call forward to calculate intermediate results + self.optimizer.zero_grad() # clear network G's existing gradients + self.backward() # calculate gradients for network G + self.optimizer.step() # update gradients for network G diff --git a/MuseTalk_project/3DMM/deep_3drecon/generate_reconstructor_opt_for_geneface.py b/MuseTalk_project/3DMM/deep_3drecon/generate_reconstructor_opt_for_geneface.py new file mode 100644 index 00000000..96e8b2ed --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/generate_reconstructor_opt_for_geneface.py @@ -0,0 +1,12 @@ +from options.test_options import TestOptions +import pickle as pkl + +# run in the root dir! +opt = TestOptions().parse() # get test options +opt.name='facerecon' +opt.epoch=20 +opt.bfm_folder='deep_3drecon/BFM/' +opt.checkpoints_dir='deep_3drecon/checkpoints/' + +with open("deep_3drecon/reconstructor_opt.pkl", 'wb') as f: + pkl.dump(opt, f) diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/options/__init__.py new file mode 100644 index 00000000..e7eedebe --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/options/__init__.py @@ -0,0 +1 @@ +"""This package options includes option modules: training options, test options, and basic options (used in both training and test).""" diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..5a103707 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/base_options.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/base_options.cpython-39.pyc new file mode 100644 index 00000000..9816fb01 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/base_options.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/test_options.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/test_options.cpython-39.pyc new file mode 100644 index 00000000..08234d53 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/options/__pycache__/test_options.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/base_options.py b/MuseTalk_project/3DMM/deep_3drecon/options/base_options.py new file mode 100644 index 00000000..37ad0bda --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/options/base_options.py @@ -0,0 +1,169 @@ +"""This script contains base options for Deep3DFaceRecon_pytorch +""" + +import argparse +import os +from util import util +import numpy as np +import torch +import deep_3drecon_models +import data + + +class BaseOptions(): + """This class defines options used during both training and test time. + + It also implements several helper functions such as parsing, printing, and saving the options. + It also gathers additional options defined in functions in both dataset class and model class. + """ + + def __init__(self, cmd_line=None): + """Reset the class; indicates the class hasn't been initailized""" + self.initialized = False + self.cmd_line = None + if cmd_line is not None: + self.cmd_line = cmd_line.split() + + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--name', type=str, default='facerecon', help='name of the experiment. It decides where to store samples and models') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--checkpoints_dir', type=str, default='./deep_3drecon/checkpoints', help='models are saved here') + parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization') + parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation') + parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel') + parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port') + parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses') + parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard') + parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation') + + # model parameters + parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.') + + # additional parameters + parser.add_argument('--epoch', type=str, default='20', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') + + self.initialized = True + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the function + in model and dataset classes. + """ + if not self.initialized: # check if it has been initialized + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + if self.cmd_line is None: + opt, _ = parser.parse_known_args() + else: + opt, _ = parser.parse_known_args(self.cmd_line) + + # set cuda visible devices + os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids + + # modify model-related parser options + model_name = opt.model + model_option_setter = deep_3drecon_models.get_option_setter(model_name) + parser = model_option_setter(parser, self.isTrain) + if self.cmd_line is None: + opt, _ = parser.parse_known_args() # parse again with new defaults + else: + opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults + + # modify dataset-related parser options + if opt.dataset_mode: + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, self.isTrain) + + # save and return the parser + self.parser = parser + if self.cmd_line is None: + return parser.parse_args() + else: + return parser.parse_args(self.cmd_line) + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + try: + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + except PermissionError as error: + print("permission error {}".format(error)) + pass + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + gpu_ids.append(id) + opt.world_size = len(gpu_ids) + # if len(opt.gpu_ids) > 0: + # torch.cuda.set_device(gpu_ids[0]) + if opt.world_size == 1: + opt.use_ddp = False + + if opt.phase != 'test': + # set continue_train automatically + if opt.pretrained_name is None: + model_dir = os.path.join(opt.checkpoints_dir, opt.name) + else: + model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name) + if os.path.isdir(model_dir): + model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')] + if os.path.isdir(model_dir) and len(model_pths) != 0: + opt.continue_train= True + + # update the latest epoch count + if opt.continue_train: + if opt.epoch == 'latest': + epoch_counts = [int(i.split('.')[0].split('_')[-1]) for i in model_pths if 'latest' not in i] + if len(epoch_counts) != 0: + opt.epoch_count = max(epoch_counts) + 1 + else: + opt.epoch_count = int(opt.epoch) + 1 + + + self.print_options(opt) + self.opt = opt + return self.opt diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/test_options.py b/MuseTalk_project/3DMM/deep_3drecon/options/test_options.py new file mode 100644 index 00000000..4ff3ad14 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/options/test_options.py @@ -0,0 +1,21 @@ +"""This script contains the test options for Deep3DFaceRecon_pytorch +""" + +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + """This class includes test options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) # define shared options + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]') + parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.') + + # Dropout and Batchnorm has different behavior during training and test. + self.isTrain = False + return parser diff --git a/MuseTalk_project/3DMM/deep_3drecon/options/train_options.py b/MuseTalk_project/3DMM/deep_3drecon/options/train_options.py new file mode 100644 index 00000000..1337bfdd --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/options/train_options.py @@ -0,0 +1,53 @@ +"""This script contains the training options for Deep3DFaceRecon_pytorch +""" + +from .base_options import BaseOptions +from util import util + +class TrainOptions(BaseOptions): + """This class includes training options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + # dataset parameters + # for train + parser.add_argument('--data_root', type=str, default='./', help='dataset root') + parser.add_argument('--flist', type=str, default='datalist/train/masks.txt', help='list of mask names of training set') + parser.add_argument('--batch_size', type=int, default=32) + parser.add_argument('--dataset_mode', type=str, default='flist', help='chooses how datasets are loaded. [None | flist]') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--preprocess', type=str, default='shift_scale_rot_flip', help='scaling and cropping of images at load time [shift_scale_rot_flip | shift_scale | shift | shift_rot_flip ]') + parser.add_argument('--use_aug', type=util.str2bool, nargs='?', const=True, default=True, help='whether use data augmentation') + + # for val + parser.add_argument('--flist_val', type=str, default='datalist/val/masks.txt', help='list of mask names of val set') + parser.add_argument('--batch_size_val', type=int, default=32) + + + # visualization parameters + parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen') + parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + + # network saving and loading parameters + parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') + parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') + parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq') + parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') + parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint') + + # training parameters + parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs with the initial learning rate') + parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam') + parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--lr_decay_epochs', type=int, default=10, help='multiply by a gamma every lr_decay_epochs epoches') + + self.isTrain = True + return parser diff --git a/MuseTalk_project/3DMM/deep_3drecon/reconstructor.py b/MuseTalk_project/3DMM/deep_3drecon/reconstructor.py new file mode 100644 index 00000000..a6f8be41 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/reconstructor.py @@ -0,0 +1,90 @@ +"""This script is the test script for Deep3DFaceRecon_pytorch +Pytorch Deep3D_Recon is 8x faster than TF-based, 16s/iter ==> 2s/iter +""" + +import os +# os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ":" + os.path.abspath("deep_3drecon") +import torch +import torch.nn as nn +from .deep_3drecon_models.facerecon_model import FaceReconModel +from .util.preprocess import align_img +from PIL import Image +import numpy as np +from .util.load_mats import load_lm3d +import torch +import pickle as pkl +from PIL import Image + +from utils.commons.tensor_utils import convert_to_tensor, convert_to_np + +with open("deep_3drecon/reconstructor_opt.pkl", "rb") as f: + opt = pkl.load(f) + +class Reconstructor(nn.Module): + def __init__(self): + super().__init__() + self.model = FaceReconModel(opt) + self.model.setup(opt) + self.model.device = 'cuda:0' + self.model.parallelize() + # self.model.to(self.model.device) + self.model.eval() + self.lm3d_std = load_lm3d(opt.bfm_folder) + + def preprocess_data(self, im, lm, lm3d_std): + # to RGB + H,W,_ = im.shape + lm = lm.reshape([-1, 2]) + lm[:, -1] = H - 1 - lm[:, -1] + + _, im, lm, _ = align_img(Image.fromarray(convert_to_np(im)), convert_to_np(lm), convert_to_np(lm3d_std)) + im = torch.tensor(np.array(im)/255., dtype=torch.float32).permute(2, 0, 1).unsqueeze(0) + lm = torch.tensor(lm).unsqueeze(0) + return im, lm + + @torch.no_grad() + def recon_coeff(self, batched_images, batched_lm5, return_image=True, batch_mode=True): + bs = batched_images.shape[0] + data_lst = [] + for i in range(bs): + img = batched_images[i] + lm5 = batched_lm5[i] + align_im, lm = self.preprocess_data(img, lm5, self.lm3d_std) + data = { + 'imgs': align_im, + 'lms': lm + } + data_lst.append(data) + if not batch_mode: + coeff_lst = [] + align_lst = [] + for i in range(bs): + data = data_lst + self.model.set_input(data) # unpack data from data loader + self.model.forward() + pred_coeff = self.model.output_coeff.cpu().numpy() + align_im = (align_im.squeeze().permute(1,2,0)*255).int().numpy().astype(np.uint8) + coeff_lst.append(pred_coeff) + align_lst.append(align_im) + batch_coeff = np.concatenate(coeff_lst) + batch_align_img = np.stack(align_lst) # [B, 257] + else: + imgs = torch.cat([d['imgs'] for d in data_lst]) + lms = torch.cat([d['lms'] for d in data_lst]) + data = { + 'imgs': imgs, + 'lms': lms + } + self.model.set_input(data) # unpack data from data loader + self.model.forward() + batch_coeff = self.model.output_coeff.cpu().numpy() + batch_align_img = (imgs.permute(0,2,3,1)*255).int().numpy().astype(np.uint8) + return batch_coeff, batch_align_img + + # todo: batch-wise recon! + + def forward(self, batched_images, batched_lm5, return_image=True): + return self.recon_coeff(batched_images, batched_lm5, return_image) + + + diff --git a/MuseTalk_project/3DMM/deep_3drecon/reconstructor_opt.pkl b/MuseTalk_project/3DMM/deep_3drecon/reconstructor_opt.pkl new file mode 100644 index 00000000..b6a7af7a Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/reconstructor_opt.pkl differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/test.py b/MuseTalk_project/3DMM/deep_3drecon/test.py new file mode 100644 index 00000000..c5207844 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/test.py @@ -0,0 +1,69 @@ +"""This script is the test script for Deep3DFaceRecon_pytorch +""" + +import os +from options.test_options import TestOptions +from deep_3drecon_models import create_model +from util.visualizer import MyVisualizer +from util.preprocess import align_img +from PIL import Image +import numpy as np +from util.load_mats import load_lm3d +import torch + +def get_data_path(root='examples'): + im_path = [os.path.join(root, i) for i in sorted(os.listdir(root)) if i.endswith('png') or i.endswith('jpg')] + lm_path = [i.replace('png', 'txt').replace('jpg', 'txt') for i in im_path] + lm_path = [os.path.join(i.replace(i.split(os.path.sep)[-1],''),'detections',i.split(os.path.sep)[-1]) for i in lm_path] + return im_path, lm_path + +def read_data(im_path, lm_path, lm3d_std, to_tensor=True): + # to RGB + im = Image.open(im_path).convert('RGB') + W,H = im.size + lm = np.loadtxt(lm_path).astype(np.float32) + lm = lm.reshape([-1, 2]) + lm[:, -1] = H - 1 - lm[:, -1] + _, im, lm, _ = align_img(im, lm, lm3d_std) + if to_tensor: + im = torch.tensor(np.array(im)/255., dtype=torch.float32).permute(2, 0, 1).unsqueeze(0) + lm = torch.tensor(lm).unsqueeze(0) + return im, lm + +def main(rank, opt, name='examples'): + device = torch.device(rank) + torch.cuda.set_device(device) + model = create_model(opt) + model.setup(opt) + model.device = device + model.parallelize() + model.eval() + visualizer = MyVisualizer(opt) + + im_path, lm_path = get_data_path(name) + lm3d_std = load_lm3d(opt.bfm_folder) + + for i in range(len(im_path)): + print(i, im_path[i]) + img_name = im_path[i].split(os.path.sep)[-1].replace('.png','').replace('.jpg','') + if not os.path.isfile(lm_path[i]): + print("%s is not found !!!"%lm_path[i]) + continue + im_tensor, lm_tensor = read_data(im_path[i], lm_path[i], lm3d_std) + data = { + 'imgs': im_tensor, + 'lms': lm_tensor + } + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() # get image results + visualizer.display_current_results(visuals, 0, opt.epoch, dataset=name.split(os.path.sep)[-1], + save_results=True, count=i, name=img_name, add_image=False) + + model.save_mesh(os.path.join(visualizer.img_dir, name.split(os.path.sep)[-1], 'epoch_%s_%06d'%(opt.epoch, 0),img_name+'.obj')) # save reconstruction meshes + model.save_coeff(os.path.join(visualizer.img_dir, name.split(os.path.sep)[-1], 'epoch_%s_%06d'%(opt.epoch, 0),img_name+'.mat')) # save predicted coefficients + +if __name__ == '__main__': + opt = TestOptions().parse() # get test options + main(0, opt, 'deep_3drecon/datasets/examples') + print(f"results saved at deep_3drecon/checkpoints/facerecon/results/") diff --git a/MuseTalk_project/3DMM/deep_3drecon/train.py b/MuseTalk_project/3DMM/deep_3drecon/train.py new file mode 100644 index 00000000..cbdda882 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/train.py @@ -0,0 +1,166 @@ +"""This script is the training script for Deep3DFaceRecon_pytorch +""" + +import os +import time +import numpy as np +import torch +from options.train_options import TrainOptions +from data import create_dataset +from deep_3drecon_models import create_model +from util.visualizer import MyVisualizer +from util.util import genvalconf +import torch.multiprocessing as mp +import torch.distributed as dist + + +def setup(rank, world_size, port): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = port + + # initialize the process group + dist.init_process_group("gloo", rank=rank, world_size=world_size) + +def cleanup(): + dist.destroy_process_group() + +def main(rank, world_size, train_opt): + val_opt = genvalconf(train_opt, isTrain=False) + + device = torch.device(rank) + torch.cuda.set_device(device) + use_ddp = train_opt.use_ddp + + if use_ddp: + setup(rank, world_size, train_opt.ddp_port) + + train_dataset, val_dataset = create_dataset(train_opt, rank=rank), create_dataset(val_opt, rank=rank) + train_dataset_batches, val_dataset_batches = \ + len(train_dataset) // train_opt.batch_size, len(val_dataset) // val_opt.batch_size + + model = create_model(train_opt) # create a model given train_opt.model and other options + model.setup(train_opt) + model.device = device + model.parallelize() + + if rank == 0: + print('The batch number of training images = %d\n, \ + the batch number of validation images = %d'% (train_dataset_batches, val_dataset_batches)) + model.print_networks(train_opt.verbose) + visualizer = MyVisualizer(train_opt) # create a visualizer that display/save images and plots + + total_iters = train_dataset_batches * (train_opt.epoch_count - 1) # the total number of training iterations + t_data = 0 + t_val = 0 + optimize_time = 0.1 + batch_size = 1 if train_opt.display_per_batch else train_opt.batch_size + + if use_ddp: + dist.barrier() + + times = [] + for epoch in range(train_opt.epoch_count, train_opt.n_epochs + 1): # outer loop for different epochs; we save the model by , + + epoch_start_time = time.time() # timer for entire epoch + iter_data_time = time.time() # timer for train_data loading per iteration + epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch + + train_dataset.set_epoch(epoch) + for i, train_data in enumerate(train_dataset): # inner loop within one epoch + iter_start_time = time.time() # timer for computation per iteration + if total_iters % train_opt.print_freq == 0: + t_data = iter_start_time - iter_data_time + total_iters += batch_size + epoch_iter += batch_size + + torch.cuda.synchronize() + optimize_start_time = time.time() + + model.set_input(train_data) # unpack train_data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + + torch.cuda.synchronize() + optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time + + if use_ddp: + dist.barrier() + + if rank == 0 and (total_iters == batch_size or total_iters % train_opt.display_freq == 0): # display images on visdom and save images to a HTML file + model.compute_visuals() + visualizer.display_current_results(model.get_current_visuals(), total_iters, epoch, + save_results=True, + add_image=train_opt.add_image) + # (total_iters == batch_size or total_iters % train_opt.evaluation_freq == 0) + + if rank == 0 and (total_iters == batch_size or total_iters % train_opt.print_freq == 0): # print training losses and save logging information to the disk + losses = model.get_current_losses() + visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data) + visualizer.plot_current_losses(total_iters, losses) + + if total_iters == batch_size or total_iters % train_opt.evaluation_freq == 0: + with torch.no_grad(): + torch.cuda.synchronize() + val_start_time = time.time() + losses_avg = {} + model.eval() + for j, val_data in enumerate(val_dataset): + model.set_input(val_data) + model.optimize_parameters(isTrain=False) + if rank == 0 and j < train_opt.vis_batch_nums: + model.compute_visuals() + visualizer.display_current_results(model.get_current_visuals(), total_iters, epoch, + dataset='val', save_results=True, count=j * val_opt.batch_size, + add_image=train_opt.add_image) + + if j < train_opt.eval_batch_nums: + losses = model.get_current_losses() + for key, value in losses.items(): + losses_avg[key] = losses_avg.get(key, 0) + value + + for key, value in losses_avg.items(): + losses_avg[key] = value / min(train_opt.eval_batch_nums, val_dataset_batches) + + torch.cuda.synchronize() + eval_time = time.time() - val_start_time + + if rank == 0: + visualizer.print_current_losses(epoch, epoch_iter, losses_avg, eval_time, t_data, dataset='val') # visualize training results + visualizer.plot_current_losses(total_iters, losses_avg, dataset='val') + model.train() + + if use_ddp: + dist.barrier() + + if rank == 0 and (total_iters == batch_size or total_iters % train_opt.save_latest_freq == 0): # cache our latest model every iterations + print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) + print(train_opt.name) # it's useful to occasionally show the experiment name on console + save_suffix = 'iter_%d' % total_iters if train_opt.save_by_iter else 'latest' + model.save_networks(save_suffix) + + if use_ddp: + dist.barrier() + + iter_data_time = time.time() + + print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, train_opt.n_epochs, time.time() - epoch_start_time)) + model.update_learning_rate() # update learning rates at the end of every epoch. + + if rank == 0 and epoch % train_opt.save_epoch_freq == 0: # cache our model every epochs + print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) + model.save_networks('latest') + model.save_networks(epoch) + + if use_ddp: + dist.barrier() + +if __name__ == '__main__': + + import warnings + warnings.filterwarnings("ignore") + + train_opt = TrainOptions().parse() # get training options + world_size = train_opt.world_size + + if train_opt.use_ddp: + mp.spawn(main, args=(world_size, train_opt), nprocs=world_size, join=True) + else: + main(0, world_size, train_opt) diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/BBRegressorParam_r.mat b/MuseTalk_project/3DMM/deep_3drecon/util/BBRegressorParam_r.mat new file mode 100644 index 00000000..1430a94e Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/BBRegressorParam_r.mat differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__init__.py b/MuseTalk_project/3DMM/deep_3drecon/util/__init__.py new file mode 100644 index 00000000..45cbc84b --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/__init__.py @@ -0,0 +1,2 @@ +"""This package includes a miscellaneous collection of useful helper functions.""" +from .util import * diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/__init__.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..923ae93f Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/__init__.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/html.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/html.cpython-39.pyc new file mode 100644 index 00000000..fa754e12 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/html.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/load_mats.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/load_mats.cpython-39.pyc new file mode 100644 index 00000000..cdd38834 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/load_mats.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/mesh_renderer.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/mesh_renderer.cpython-39.pyc new file mode 100644 index 00000000..6f28bf01 Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/mesh_renderer.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/preprocess.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/preprocess.cpython-39.pyc new file mode 100644 index 00000000..fd29ea7f Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/preprocess.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/util.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/util.cpython-39.pyc new file mode 100644 index 00000000..3e8c9efd Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/util.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/visualizer.cpython-39.pyc b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/visualizer.cpython-39.pyc new file mode 100644 index 00000000..f1b79b1e Binary files /dev/null and b/MuseTalk_project/3DMM/deep_3drecon/util/__pycache__/visualizer.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/detect_lm68.py b/MuseTalk_project/3DMM/deep_3drecon/util/detect_lm68.py new file mode 100644 index 00000000..b7e40997 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/detect_lm68.py @@ -0,0 +1,106 @@ +import os +import cv2 +import numpy as np +from scipy.io import loadmat +import tensorflow as tf +from util.preprocess import align_for_lm +from shutil import move + +mean_face = np.loadtxt('util/test_mean_face.txt') +mean_face = mean_face.reshape([68, 2]) + +def save_label(labels, save_path): + np.savetxt(save_path, labels) + +def draw_landmarks(img, landmark, save_name): + landmark = landmark + lm_img = np.zeros([img.shape[0], img.shape[1], 3]) + lm_img[:] = img.astype(np.float32) + landmark = np.round(landmark).astype(np.int32) + + for i in range(len(landmark)): + for j in range(-1, 1): + for k in range(-1, 1): + if img.shape[0] - 1 - landmark[i, 1]+j > 0 and \ + img.shape[0] - 1 - landmark[i, 1]+j < img.shape[0] and \ + landmark[i, 0]+k > 0 and \ + landmark[i, 0]+k < img.shape[1]: + lm_img[img.shape[0] - 1 - landmark[i, 1]+j, landmark[i, 0]+k, + :] = np.array([0, 0, 255]) + lm_img = lm_img.astype(np.uint8) + + cv2.imwrite(save_name, lm_img) + + +def load_data(img_name, txt_name): + return cv2.imread(img_name), np.loadtxt(txt_name) + +# create tensorflow graph for landmark detector +def load_lm_graph(graph_filename): + with tf.gfile.GFile(graph_filename, 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='net') + img_224 = graph.get_tensor_by_name('net/input_imgs:0') + output_lm = graph.get_tensor_by_name('net/lm:0') + lm_sess = tf.Session(graph=graph) + + return lm_sess,img_224,output_lm + +# landmark detection +def detect_68p(img_path,sess,input_op,output_op): + print('detecting landmarks......') + names = [i for i in sorted(os.listdir( + img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i] + vis_path = os.path.join(img_path, 'vis') + remove_path = os.path.join(img_path, 'remove') + save_path = os.path.join(img_path, 'landmarks') + if not os.path.isdir(vis_path): + os.makedirs(vis_path) + if not os.path.isdir(remove_path): + os.makedirs(remove_path) + if not os.path.isdir(save_path): + os.makedirs(save_path) + + for i in range(0, len(names)): + name = names[i] + print('%05d' % (i), ' ', name) + full_image_name = os.path.join(img_path, name) + txt_name = '.'.join(name.split('.')[:-1]) + '.txt' + full_txt_name = os.path.join(img_path, 'detections', txt_name) # 5 facial landmark path for each image + + # if an image does not have detected 5 facial landmarks, remove it from the training list + if not os.path.isfile(full_txt_name): + move(full_image_name, os.path.join(remove_path, name)) + continue + + # load data + img, five_points = load_data(full_image_name, full_txt_name) + input_img, scale, bbox = align_for_lm(img, five_points) # align for 68 landmark detection + + # if the alignment fails, remove corresponding image from the training list + if scale == 0: + move(full_txt_name, os.path.join( + remove_path, txt_name)) + move(full_image_name, os.path.join(remove_path, name)) + continue + + # detect landmarks + input_img = np.reshape( + input_img, [1, 224, 224, 3]).astype(np.float32) + landmark = sess.run( + output_op, feed_dict={input_op: input_img}) + + # transform back to original image coordinate + landmark = landmark.reshape([68, 2]) + mean_face + landmark[:, 1] = 223 - landmark[:, 1] + landmark = landmark / scale + landmark[:, 0] = landmark[:, 0] + bbox[0] + landmark[:, 1] = landmark[:, 1] + bbox[1] + landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1] + + if i % 100 == 0: + draw_landmarks(img, landmark, os.path.join(vis_path, name)) + save_label(landmark, os.path.join(save_path, txt_name)) diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/generate_list.py b/MuseTalk_project/3DMM/deep_3drecon/util/generate_list.py new file mode 100644 index 00000000..943d9067 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/generate_list.py @@ -0,0 +1,34 @@ +"""This script is to generate training list files for Deep3DFaceRecon_pytorch +""" + +import os + +# save path to training data +def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''): + save_path = os.path.join(save_folder, mode) + if not os.path.isdir(save_path): + os.makedirs(save_path) + with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd: + fd.writelines([i + '\n' for i in lms_list]) + + with open(os.path.join(save_path, save_name + 'images.txt'), 'w') as fd: + fd.writelines([i + '\n' for i in imgs_list]) + + with open(os.path.join(save_path, save_name + 'masks.txt'), 'w') as fd: + fd.writelines([i + '\n' for i in msks_list]) + +# check if the path is valid +def check_list(rlms_list, rimgs_list, rmsks_list): + lms_list, imgs_list, msks_list = [], [], [] + for i in range(len(rlms_list)): + flag = 'false' + lm_path = rlms_list[i] + im_path = rimgs_list[i] + msk_path = rmsks_list[i] + if os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path): + flag = 'true' + lms_list.append(rlms_list[i]) + imgs_list.append(rimgs_list[i]) + msks_list.append(rmsks_list[i]) + print(i, rlms_list[i], flag) + return lms_list, imgs_list, msks_list diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/html.py b/MuseTalk_project/3DMM/deep_3drecon/util/html.py new file mode 100644 index 00000000..cc3262a1 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/html.py @@ -0,0 +1,86 @@ +import dominate +from dominate.tags import meta, h3, table, tr, td, p, a, img, br +import os + + +class HTML: + """This HTML class allows us to save images and write texts into a single HTML file. + + It consists of functions such as (add a text header to the HTML file), + (add a row of images to the HTML file), and (save the HTML to the disk). + It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. + """ + + def __init__(self, web_dir, title, refresh=0): + """Initialize the HTML classes + + Parameters: + web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: + with self.doc.head: + meta(http_equiv="refresh", content=str(refresh)) + + def get_image_dir(self): + """Return the directory that stores images""" + return self.img_dir + + def add_header(self, text): + """Insert a header to the HTML file + + Parameters: + text (str) -- the header text + """ + with self.doc: + h3(text) + + def add_images(self, ims, txts, links, width=400): + """add images to the HTML file + + Parameters: + ims (str list) -- a list of image paths + txts (str list) -- a list of image names shown on the website + links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page + """ + self.t = table(border=1, style="table-layout: fixed;") # Insert a table + self.doc.add(self.t) + with self.t: + with tr(): + for im, txt, link in zip(ims, txts, links): + with td(style="word-wrap: break-word;", halign="center", valign="top"): + with p(): + with a(href=os.path.join('images', link)): + img(style="width:%dpx" % width, src=os.path.join('images', im)) + br() + p(txt) + + def save(self): + """save the current content to the HMTL file""" + html_file = '%s/index.html' % self.web_dir + f = open(html_file, 'wt') + f.write(self.doc.render()) + f.close() + + +if __name__ == '__main__': # we show an example usage here. + html = HTML('web/', 'test_html') + html.add_header('hello world') + + ims, txts, links = [], [], [] + for n in range(4): + ims.append('image_%d.png' % n) + txts.append('text_%d' % n) + links.append('image_%d.png' % n) + html.add_images(ims, txts, links) + html.save() diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/load_mats.py b/MuseTalk_project/3DMM/deep_3drecon/util/load_mats.py new file mode 100644 index 00000000..5b1f4a73 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/load_mats.py @@ -0,0 +1,117 @@ +"""This script is to load 3D face model for Deep3DFaceRecon_pytorch +""" + +import numpy as np +from PIL import Image +from scipy.io import loadmat, savemat +from array import array +import os.path as osp + +# load expression basis +def LoadExpBasis(bfm_folder='BFM'): + n_vertex = 53215 + Expbin = open(osp.join(bfm_folder, 'Exp_Pca.bin'), 'rb') + exp_dim = array('i') + exp_dim.fromfile(Expbin, 1) + expMU = array('f') + expPC = array('f') + expMU.fromfile(Expbin, 3*n_vertex) + expPC.fromfile(Expbin, 3*exp_dim[0]*n_vertex) + Expbin.close() + + expPC = np.array(expPC) + expPC = np.reshape(expPC, [exp_dim[0], -1]) + expPC = np.transpose(expPC) + + expEV = np.loadtxt(osp.join(bfm_folder, 'std_exp.txt')) + + return expPC, expEV + + +# transfer original BFM09 to our face model +def transferBFM09(bfm_folder='BFM'): + print('Transfer BFM09 to BFM_model_front......') + original_BFM = loadmat(osp.join(bfm_folder, '01_MorphableModel.mat')) + shapePC = original_BFM['shapePC'] # shape basis + shapeEV = original_BFM['shapeEV'] # corresponding eigen value + shapeMU = original_BFM['shapeMU'] # mean face + texPC = original_BFM['texPC'] # texture basis + texEV = original_BFM['texEV'] # eigen value + texMU = original_BFM['texMU'] # mean texture + + expPC, expEV = LoadExpBasis() + + # transfer BFM09 to our face model + + idBase = shapePC*np.reshape(shapeEV, [-1, 199]) + idBase = idBase/1e5 # unify the scale to decimeter + idBase = idBase[:, :80] # use only first 80 basis + + exBase = expPC*np.reshape(expEV, [-1, 79]) + exBase = exBase/1e5 # unify the scale to decimeter + exBase = exBase[:, :64] # use only first 64 basis + + texBase = texPC*np.reshape(texEV, [-1, 199]) + texBase = texBase[:, :80] # use only first 80 basis + + # our face model is cropped along face landmarks and contains only 35709 vertex. + # original BFM09 contains 53490 vertex, and expression basis provided by Guo et al. contains 53215 vertex. + # thus we select corresponding vertex to get our face model. + + index_exp = loadmat(osp.join(bfm_folder, 'BFM_front_idx.mat')) + index_exp = index_exp['idx'].astype(np.int32) - 1 # starts from 0 (to 53215) + + index_shape = loadmat(osp.join(bfm_folder, 'BFM_exp_idx.mat')) + index_shape = index_shape['trimIndex'].astype( + np.int32) - 1 # starts from 0 (to 53490) + index_shape = index_shape[index_exp] + + idBase = np.reshape(idBase, [-1, 3, 80]) + idBase = idBase[index_shape, :, :] + idBase = np.reshape(idBase, [-1, 80]) + + texBase = np.reshape(texBase, [-1, 3, 80]) + texBase = texBase[index_shape, :, :] + texBase = np.reshape(texBase, [-1, 80]) + + exBase = np.reshape(exBase, [-1, 3, 64]) + exBase = exBase[index_exp, :, :] + exBase = np.reshape(exBase, [-1, 64]) + + meanshape = np.reshape(shapeMU, [-1, 3])/1e5 + meanshape = meanshape[index_shape, :] + meanshape = np.reshape(meanshape, [1, -1]) + + meantex = np.reshape(texMU, [-1, 3]) + meantex = meantex[index_shape, :] + meantex = np.reshape(meantex, [1, -1]) + + # other info contains triangles, region used for computing photometric loss, + # region used for skin texture regularization, and 68 landmarks index etc. + other_info = loadmat(osp.join(bfm_folder, 'facemodel_info.mat')) + frontmask2_idx = other_info['frontmask2_idx'] + skinmask = other_info['skinmask'] + keypoints = other_info['keypoints'] + point_buf = other_info['point_buf'] + tri = other_info['tri'] + tri_mask2 = other_info['tri_mask2'] + + # save our face model + savemat(osp.join(bfm_folder, 'BFM_model_front.mat'), {'meanshape': meanshape, 'meantex': meantex, 'idBase': idBase, 'exBase': exBase, 'texBase': texBase, + 'tri': tri, 'point_buf': point_buf, 'tri_mask2': tri_mask2, 'keypoints': keypoints, 'frontmask2_idx': frontmask2_idx, 'skinmask': skinmask}) + + +# load landmarks for standard face, which is used for image preprocessing +def load_lm3d(bfm_folder): + + Lm3D = loadmat(osp.join(bfm_folder, 'similarity_Lm3D_all.mat')) + Lm3D = Lm3D['lm'] + + # calculate 5 facial landmarks using 68 landmarks + lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1 + Lm3D = np.stack([Lm3D[lm_idx[0], :], np.mean(Lm3D[lm_idx[[1, 2]], :], 0), np.mean( + Lm3D[lm_idx[[3, 4]], :], 0), Lm3D[lm_idx[5], :], Lm3D[lm_idx[6], :]], axis=0) + Lm3D = Lm3D[[1, 2, 0, 3, 4], :] + + return Lm3D + diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/mesh_renderer.py b/MuseTalk_project/3DMM/deep_3drecon/util/mesh_renderer.py new file mode 100644 index 00000000..5b7b5a23 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/mesh_renderer.py @@ -0,0 +1,126 @@ +"""This script is the differentiable renderer for Deep3DFaceRecon_pytorch + Attention, antialiasing step is missing in current version. +""" +import pytorch3d.ops +import torch +import torch.nn.functional as F +import kornia +from kornia.geometry.camera import pixel2cam +import numpy as np +from typing import List +from scipy.io import loadmat +from torch import nn + +from pytorch3d.structures import Meshes +from pytorch3d.renderer import ( + look_at_view_transform, + FoVPerspectiveCameras, + DirectionalLights, + RasterizationSettings, + MeshRenderer, + MeshRasterizer, + SoftPhongShader, + TexturesUV, +) + +# def ndc_projection(x=0.1, n=1.0, f=50.0): +# return np.array([[n/x, 0, 0, 0], +# [ 0, n/-x, 0, 0], +# [ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)], +# [ 0, 0, -1, 0]]).astype(np.float32) + +class MeshRenderer(nn.Module): + def __init__(self, + rasterize_fov, + znear=0.1, + zfar=10, + rasterize_size=224,**args): + super(MeshRenderer, self).__init__() + + # x = np.tan(np.deg2rad(rasterize_fov * 0.5)) * znear + # self.ndc_proj = torch.tensor(ndc_projection(x=x, n=znear, f=zfar)).matmul( + # torch.diag(torch.tensor([1., -1, -1, 1]))) + self.rasterize_size = rasterize_size + self.fov = rasterize_fov + self.znear = znear + self.zfar = zfar + + self.rasterizer = None + + def forward(self, vertex, tri, feat=None): + """ + Return: + mask -- torch.tensor, size (B, 1, H, W) + depth -- torch.tensor, size (B, 1, H, W) + features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None + + Parameters: + vertex -- torch.tensor, size (B, N, 3) + tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles + feat(optional) -- torch.tensor, size (B, N ,C), features + """ + device = vertex.device + rsize = int(self.rasterize_size) + # ndc_proj = self.ndc_proj.to(device) + # trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v + if vertex.shape[-1] == 3: + vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1) + vertex[..., 0] = -vertex[..., 0] + + + # vertex_ndc = vertex @ ndc_proj.t() + if self.rasterizer is None: + self.rasterizer = MeshRasterizer() + print("create rasterizer on device cuda:%d"%device.index) + + # ranges = None + # if isinstance(tri, List) or len(tri.shape) == 3: + # vum = vertex_ndc.shape[1] + # fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device) + # fstartidx = torch.cumsum(fnum, dim=0) - fnum + # ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu() + # for i in range(tri.shape[0]): + # tri[i] = tri[i] + i*vum + # vertex_ndc = torch.cat(vertex_ndc, dim=0) + # tri = torch.cat(tri, dim=0) + + # for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3] + tri = tri.type(torch.int32).contiguous() + + # rasterize + cameras = FoVPerspectiveCameras( + device=device, + fov=self.fov, + znear=self.znear, + zfar=self.zfar, + ) + + raster_settings = RasterizationSettings( + image_size=rsize + ) + + # print(vertex.shape, tri.shape) + mesh = Meshes(vertex.contiguous()[...,:3], tri.unsqueeze(0)) + + fragments = self.rasterizer(mesh, cameras = cameras, raster_settings = raster_settings) + rast_out = fragments.pix_to_face.squeeze(-1) + depth = fragments.zbuf + + # render depth + depth = depth.permute(0, 3, 1, 2) + mask = (rast_out > 0).float().unsqueeze(1) + depth = mask * depth + + + image = None + if feat is not None: + attributes = feat.reshape(-1,3)[mesh.faces_packed()] + image = pytorch3d.ops.interpolate_face_attributes(fragments.pix_to_face, + fragments.bary_coords, + attributes) + # print(image.shape) + image = image.squeeze(-2).permute(0, 3, 1, 2) + image = mask * image + + return mask, depth, image + diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/preprocess.py b/MuseTalk_project/3DMM/deep_3drecon/util/preprocess.py new file mode 100644 index 00000000..6c4a913e --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/preprocess.py @@ -0,0 +1,230 @@ +"""This script contains the image preprocessing code for Deep3DFaceRecon_pytorch +""" + +import numpy as np +from scipy.io import loadmat +from PIL import Image +import cv2 +import os +from skimage import transform as trans +import torch +import warnings +warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) +warnings.filterwarnings("ignore", category=FutureWarning) + + +# calculating least square problem for image alignment +def POS(xp, x): + npts = xp.shape[1] + + A = np.zeros([2*npts, 8]) + + A[0:2*npts-1:2, 0:3] = x.transpose() + A[0:2*npts-1:2, 3] = 1 + + A[1:2*npts:2, 4:7] = x.transpose() + A[1:2*npts:2, 7] = 1 + + b = np.reshape(xp.transpose(), [2*npts, 1]) + + k, _, _, _ = np.linalg.lstsq(A, b) + + R1 = k[0:3] + R2 = k[4:7] + sTx = k[3] + sTy = k[7] + s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2 + t = np.stack([sTx, sTy], axis=0) + + return t, s + +# bounding box for 68 landmark detection +def BBRegression(points, params): + + w1 = params['W1'] + b1 = params['B1'] + w2 = params['W2'] + b2 = params['B2'] + data = points.copy() + data = data.reshape([5, 2]) + data_mean = np.mean(data, axis=0) + x_mean = data_mean[0] + y_mean = data_mean[1] + data[:, 0] = data[:, 0] - x_mean + data[:, 1] = data[:, 1] - y_mean + + rms = np.sqrt(np.sum(data ** 2)/5) + data = data / rms + data = data.reshape([1, 10]) + data = np.transpose(data) + inputs = np.matmul(w1, data) + b1 + inputs = 2 / (1 + np.exp(-2 * inputs)) - 1 + inputs = np.matmul(w2, inputs) + b2 + inputs = np.transpose(inputs) + x = inputs[:, 0] * rms + x_mean + y = inputs[:, 1] * rms + y_mean + w = 224/inputs[:, 2] * rms + rects = [x, y, w, w] + return np.array(rects).reshape([4]) + +# utils for landmark detection +def img_padding(img, box): + success = True + bbox = box.copy() + res = np.zeros([2*img.shape[0], 2*img.shape[1], 3]) + res[img.shape[0] // 2: img.shape[0] + img.shape[0] // + 2, img.shape[1] // 2: img.shape[1] + img.shape[1]//2] = img + + bbox[0] = bbox[0] + img.shape[1] // 2 + bbox[1] = bbox[1] + img.shape[0] // 2 + if bbox[0] < 0 or bbox[1] < 0: + success = False + return res, bbox, success + +# utils for landmark detection +def crop(img, bbox): + padded_img, padded_bbox, flag = img_padding(img, bbox) + if flag: + crop_img = padded_img[padded_bbox[1]: padded_bbox[1] + + padded_bbox[3], padded_bbox[0]: padded_bbox[0] + padded_bbox[2]] + crop_img = cv2.resize(crop_img.astype(np.uint8), + (224, 224), interpolation=cv2.INTER_CUBIC) + scale = 224 / padded_bbox[3] + return crop_img, scale + else: + return padded_img, 0 + +# utils for landmark detection +def scale_trans(img, lm, t, s): + imgw = img.shape[1] + imgh = img.shape[0] + M_s = np.array([[1, 0, -t[0] + imgw//2 + 0.5], [0, 1, -imgh//2 + t[1]]], + dtype=np.float32) + img = cv2.warpAffine(img, M_s, (imgw, imgh)) + w = int(imgw / s * 100) + h = int(imgh / s * 100) + img = cv2.resize(img, (w, h)) + lm = np.stack([lm[:, 0] - t[0] + imgw // 2, lm[:, 1] - + t[1] + imgh // 2], axis=1) / s * 100 + + left = w//2 - 112 + up = h//2 - 112 + bbox = [left, up, 224, 224] + cropped_img, scale2 = crop(img, bbox) + assert(scale2!=0) + t1 = np.array([bbox[0], bbox[1]]) + + # back to raw img s * crop + s * t1 + t2 + t1 = np.array([w//2 - 112, h//2 - 112]) + scale = s / 100 + t2 = np.array([t[0] - imgw/2, t[1] - imgh / 2]) + inv = (scale/scale2, scale * t1 + t2.reshape([2])) + return cropped_img, inv + +# utils for landmark detection +def align_for_lm(img, five_points): + five_points = np.array(five_points).reshape([1, 10]) + params = loadmat('util/BBRegressorParam_r.mat') + bbox = BBRegression(five_points, params) + assert(bbox[2] != 0) + bbox = np.round(bbox).astype(np.int32) + crop_img, scale = crop(img, bbox) + return crop_img, scale, bbox + + +# resize and crop images for face reconstruction +def resize_n_crop_img(img, lm, t, s, target_size=224., mask=None): + w0, h0 = img.size + w = (w0*s).astype(np.int32) + h = (h0*s).astype(np.int32) + left = (w/2 - target_size/2 + float((t[0] - w0/2)*s)).astype(np.int32) + right = left + target_size + up = (h/2 - target_size/2 + float((h0/2 - t[1])*s)).astype(np.int32) + below = up + target_size + + img = img.resize((w, h), resample=Image.BICUBIC) + img = img.crop((left, up, right, below)) + + if mask is not None: + mask = mask.resize((w, h), resample=Image.BICUBIC) + mask = mask.crop((left, up, right, below)) + + lm = np.stack([lm[:, 0] - t[0] + w0/2, lm[:, 1] - + t[1] + h0/2], axis=1)*s + lm = lm - np.reshape( + np.array([(w/2 - target_size/2), (h/2-target_size/2)]), [1, 2]) + + return img, lm, mask + +# utils for face reconstruction +def extract_5p(lm): + lm_idx = np.array([31, 37, 40, 43, 46, 49, 55]) - 1 + lm5p = np.stack([lm[lm_idx[0], :], np.mean(lm[lm_idx[[1, 2]], :], 0), np.mean( + lm[lm_idx[[3, 4]], :], 0), lm[lm_idx[5], :], lm[lm_idx[6], :]], axis=0) + lm5p = lm5p[[1, 2, 0, 3, 4], :] + return lm5p + +# utils for face reconstruction +def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.): + """ + Return: + transparams --numpy.array (raw_W, raw_H, scale, tx, ty) + img_new --PIL.Image (target_size, target_size, 3) + lm_new --numpy.array (68, 2), y direction is opposite to v direction + mask_new --PIL.Image (target_size, target_size) + + Parameters: + img --PIL.Image (raw_H, raw_W, 3) + lm --numpy.array (68, 2), y direction is opposite to v direction + lm3D --numpy.array (5, 3) + mask --PIL.Image (raw_H, raw_W, 3) + """ + w0, h0 = img.size + + + if lm.shape[0] != 5: + lm5p = extract_5p(lm) + else: + lm5p = lm + + # calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face + t, s = POS(lm5p.transpose(), lm3D.transpose()) + s = rescale_factor/s + + # processing the image + img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask) + trans_params = np.array([w0, h0, s, t[0], t[1]]) + + return trans_params, img_new, lm_new, mask_new + +# utils for face recognition model +def estimate_norm(lm_68p, H): + # from https://github.com/deepinsight/insightface/blob/c61d3cd208a603dfa4a338bd743b320ce3e94730/recognition/common/face_align.py#L68 + """ + Return: + trans_m --numpy.array (2, 3) + Parameters: + lm --numpy.array (68, 2), y direction is opposite to v direction + H --int/float , image height + """ + lm = extract_5p(lm_68p) + lm[:, -1] = H - 1 - lm[:, -1] + tform = trans.SimilarityTransform() + src = np.array( + [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], + [41.5493, 92.3655], [70.7299, 92.2041]], + dtype=np.float32) + tform.estimate(lm, src) + M = tform.params + if np.linalg.det(M) == 0: + M = np.eye(3) + + return M[0:2, :] + +def estimate_norm_torch(lm_68p, H): + lm_68p_ = lm_68p.detach().cpu().numpy() + M = [] + for i in range(lm_68p_.shape[0]): + M.append(estimate_norm(lm_68p_[i], H)) + M = torch.tensor(np.array(M), dtype=torch.float32).to(lm_68p.device) + return M diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/skin_mask.py b/MuseTalk_project/3DMM/deep_3drecon/util/skin_mask.py new file mode 100644 index 00000000..a8a74e4c --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/skin_mask.py @@ -0,0 +1,125 @@ +"""This script is to generate skin attention mask for Deep3DFaceRecon_pytorch +""" + +import math +import numpy as np +import os +import cv2 + +class GMM: + def __init__(self, dim, num, w, mu, cov, cov_det, cov_inv): + self.dim = dim # feature dimension + self.num = num # number of Gaussian components + self.w = w # weights of Gaussian components (a list of scalars) + self.mu= mu # mean of Gaussian components (a list of 1xdim vectors) + self.cov = cov # covariance matrix of Gaussian components (a list of dimxdim matrices) + self.cov_det = cov_det # pre-computed determinet of covariance matrices (a list of scalars) + self.cov_inv = cov_inv # pre-computed inverse covariance matrices (a list of dimxdim matrices) + + self.factor = [0]*num + for i in range(self.num): + self.factor[i] = (2*math.pi)**(self.dim/2) * self.cov_det[i]**0.5 + + def likelihood(self, data): + assert(data.shape[1] == self.dim) + N = data.shape[0] + lh = np.zeros(N) + + for i in range(self.num): + data_ = data - self.mu[i] + + tmp = np.matmul(data_,self.cov_inv[i]) * data_ + tmp = np.sum(tmp,axis=1) + power = -0.5 * tmp + + p = np.array([math.exp(power[j]) for j in range(N)]) + p = p/self.factor[i] + lh += p*self.w[i] + + return lh + + +def _rgb2ycbcr(rgb): + m = np.array([[65.481, 128.553, 24.966], + [-37.797, -74.203, 112], + [112, -93.786, -18.214]]) + shape = rgb.shape + rgb = rgb.reshape((shape[0] * shape[1], 3)) + ycbcr = np.dot(rgb, m.transpose() / 255.) + ycbcr[:, 0] += 16. + ycbcr[:, 1:] += 128. + return ycbcr.reshape(shape) + + +def _bgr2ycbcr(bgr): + rgb = bgr[..., ::-1] + return _rgb2ycbcr(rgb) + + +gmm_skin_w = [0.24063933, 0.16365987, 0.26034665, 0.33535415] +gmm_skin_mu = [np.array([113.71862, 103.39613, 164.08226]), + np.array([150.19858, 105.18467, 155.51428]), + np.array([183.92976, 107.62468, 152.71820]), + np.array([114.90524, 113.59782, 151.38217])] +gmm_skin_cov_det = [5692842.5, 5851930.5, 2329131., 1585971.] +gmm_skin_cov_inv = [np.array([[0.0019472069, 0.0020450759, -0.00060243998],[0.0020450759, 0.017700525, 0.0051420014],[-0.00060243998, 0.0051420014, 0.0081308950]]), + np.array([[0.0027110141, 0.0011036990, 0.0023122299],[0.0011036990, 0.010707724, 0.010742856],[0.0023122299, 0.010742856, 0.017481629]]), + np.array([[0.0048026871, 0.00022935172, 0.0077668377],[0.00022935172, 0.011729696, 0.0081661865],[0.0077668377, 0.0081661865, 0.025374353]]), + np.array([[0.0011989699, 0.0022453172, -0.0010748957],[0.0022453172, 0.047758564, 0.020332102],[-0.0010748957, 0.020332102, 0.024502251]])] + +gmm_skin = GMM(3, 4, gmm_skin_w, gmm_skin_mu, [], gmm_skin_cov_det, gmm_skin_cov_inv) + +gmm_nonskin_w = [0.12791070, 0.31130761, 0.34245777, 0.21832393] +gmm_nonskin_mu = [np.array([99.200851, 112.07533, 140.20602]), + np.array([110.91392, 125.52969, 130.19237]), + np.array([129.75864, 129.96107, 126.96808]), + np.array([112.29587, 128.85121, 129.05431])] +gmm_nonskin_cov_det = [458703648., 6466488., 90611376., 133097.63] +gmm_nonskin_cov_inv = [np.array([[0.00085371657, 0.00071197288, 0.00023958916],[0.00071197288, 0.0025935620, 0.00076557708],[0.00023958916, 0.00076557708, 0.0015042332]]), + np.array([[0.00024650150, 0.00045542428, 0.00015019422],[0.00045542428, 0.026412144, 0.018419769],[0.00015019422, 0.018419769, 0.037497383]]), + np.array([[0.00037054974, 0.00038146760, 0.00040408765],[0.00038146760, 0.0085505722, 0.0079136286],[0.00040408765, 0.0079136286, 0.010982352]]), + np.array([[0.00013709733, 0.00051228428, 0.00012777430],[0.00051228428, 0.28237113, 0.10528370],[0.00012777430, 0.10528370, 0.23468947]])] + +gmm_nonskin = GMM(3, 4, gmm_nonskin_w, gmm_nonskin_mu, [], gmm_nonskin_cov_det, gmm_nonskin_cov_inv) + +prior_skin = 0.8 +prior_nonskin = 1 - prior_skin + + +# calculate skin attention mask +def skinmask(imbgr): + im = _bgr2ycbcr(imbgr) + + data = im.reshape((-1,3)) + + lh_skin = gmm_skin.likelihood(data) + lh_nonskin = gmm_nonskin.likelihood(data) + + tmp1 = prior_skin * lh_skin + tmp2 = prior_nonskin * lh_nonskin + post_skin = tmp1 / (tmp1+tmp2) # posterior probability + + post_skin = post_skin.reshape((im.shape[0],im.shape[1])) + + post_skin = np.round(post_skin*255) + post_skin = post_skin.astype(np.uint8) + post_skin = np.tile(np.expand_dims(post_skin,2),[1,1,3]) # reshape to H*W*3 + + return post_skin + + +def get_skin_mask(img_path): + print('generating skin masks......') + names = [i for i in sorted(os.listdir( + img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i] + save_path = os.path.join(img_path, 'mask') + if not os.path.isdir(save_path): + os.makedirs(save_path) + + for i in range(0, len(names)): + name = names[i] + print('%05d' % (i), ' ', name) + full_image_name = os.path.join(img_path, name) + img = cv2.imread(full_image_name).astype(np.float32) + skin_img = skinmask(img) + cv2.imwrite(os.path.join(save_path, name), skin_img.astype(np.uint8)) diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/test_mean_face.txt b/MuseTalk_project/3DMM/deep_3drecon/util/test_mean_face.txt new file mode 100644 index 00000000..3a46d4db --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/test_mean_face.txt @@ -0,0 +1,136 @@ +-5.228591537475585938e+01 +2.078247070312500000e-01 +-5.064269638061523438e+01 +-1.315765380859375000e+01 +-4.952939224243164062e+01 +-2.592591094970703125e+01 +-4.793047332763671875e+01 +-3.832135772705078125e+01 +-4.512159729003906250e+01 +-5.059623336791992188e+01 +-3.917720794677734375e+01 +-6.043736648559570312e+01 +-2.929953765869140625e+01 +-6.861183166503906250e+01 +-1.719801330566406250e+01 +-7.572736358642578125e+01 +-1.961936950683593750e+00 +-7.862001037597656250e+01 +1.467941284179687500e+01 +-7.607844543457031250e+01 +2.744073486328125000e+01 +-6.915261840820312500e+01 +3.855677795410156250e+01 +-5.950350570678710938e+01 +4.478240966796875000e+01 +-4.867547225952148438e+01 +4.714337158203125000e+01 +-3.800830078125000000e+01 +4.940315246582031250e+01 +-2.496297454833984375e+01 +5.117234802246093750e+01 +-1.241538238525390625e+01 +5.190507507324218750e+01 +8.244247436523437500e-01 +-4.150688934326171875e+01 +2.386329650878906250e+01 +-3.570307159423828125e+01 +3.017010498046875000e+01 +-2.790358734130859375e+01 +3.212951660156250000e+01 +-1.941773223876953125e+01 +3.156523132324218750e+01 +-1.138106536865234375e+01 +2.841992187500000000e+01 +5.993263244628906250e+00 +2.895182800292968750e+01 +1.343590545654296875e+01 +3.189880371093750000e+01 +2.203153991699218750e+01 +3.302221679687500000e+01 +2.992478942871093750e+01 +3.099150085449218750e+01 +3.628388977050781250e+01 +2.765748596191406250e+01 +-1.933914184570312500e+00 +1.405374145507812500e+01 +-2.153038024902343750e+00 +5.772636413574218750e+00 +-2.270050048828125000e+00 +-2.121643066406250000e+00 +-2.218330383300781250e+00 +-1.068978118896484375e+01 +-1.187252044677734375e+01 +-1.997912597656250000e+01 +-6.879402160644531250e+00 +-2.143579864501953125e+01 +-1.227821350097656250e+00 +-2.193494415283203125e+01 +4.623237609863281250e+00 +-2.152721405029296875e+01 +9.721397399902343750e+00 +-1.953671264648437500e+01 +-3.648714447021484375e+01 +9.811126708984375000e+00 +-3.130242919921875000e+01 +1.422447967529296875e+01 +-2.212834930419921875e+01 +1.493019866943359375e+01 +-1.500880432128906250e+01 +1.073588562011718750e+01 +-2.095037078857421875e+01 +9.054298400878906250e+00 +-3.050099182128906250e+01 +8.704177856445312500e+00 +1.173237609863281250e+01 +1.054329681396484375e+01 +1.856353759765625000e+01 +1.535009765625000000e+01 +2.893331909179687500e+01 +1.451992797851562500e+01 +3.452944946289062500e+01 +1.065280151367187500e+01 +2.875990295410156250e+01 +8.654792785644531250e+00 +1.942100524902343750e+01 +9.422447204589843750e+00 +-2.204488372802734375e+01 +-3.983994293212890625e+01 +-1.324458312988281250e+01 +-3.467377471923828125e+01 +-6.749649047851562500e+00 +-3.092894744873046875e+01 +-9.183349609375000000e-01 +-3.196458435058593750e+01 +4.220649719238281250e+00 +-3.090406036376953125e+01 +1.089889526367187500e+01 +-3.497008514404296875e+01 +1.874589538574218750e+01 +-4.065438079833984375e+01 +1.124106597900390625e+01 +-4.438417816162109375e+01 +5.181709289550781250e+00 +-4.649170684814453125e+01 +-1.158607482910156250e+00 +-4.680406951904296875e+01 +-7.918922424316406250e+00 +-4.671575164794921875e+01 +-1.452505493164062500e+01 +-4.416526031494140625e+01 +-2.005007171630859375e+01 +-3.997841644287109375e+01 +-1.054919433593750000e+01 +-3.849683380126953125e+01 +-1.051826477050781250e+00 +-3.794863128662109375e+01 +6.412681579589843750e+00 +-3.804645538330078125e+01 +1.627674865722656250e+01 +-4.039697265625000000e+01 +6.373878479003906250e+00 +-4.087213897705078125e+01 +-8.551712036132812500e-01 +-4.157129669189453125e+01 +-1.014953613281250000e+01 +-4.128469085693359375e+01 diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/util.py b/MuseTalk_project/3DMM/deep_3drecon/util/util.py new file mode 100644 index 00000000..0d689ca1 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/util.py @@ -0,0 +1,208 @@ +"""This script contains basic utilities for Deep3DFaceRecon_pytorch +""" +from __future__ import print_function +import numpy as np +import torch +from PIL import Image +import os +import importlib +import argparse +from argparse import Namespace +import torchvision + + +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def copyconf(default_opt, **kwargs): + conf = Namespace(**vars(default_opt)) + for key in kwargs: + setattr(conf, key, kwargs[key]) + return conf + +def genvalconf(train_opt, **kwargs): + conf = Namespace(**vars(train_opt)) + attr_dict = train_opt.__dict__ + for key, value in attr_dict.items(): + if 'val' in key and key.split('_')[0] in attr_dict: + setattr(conf, key.split('_')[0], value) + + for key in kwargs: + setattr(conf, key, kwargs[key]) + + return conf + +def find_class_in_module(target_cls_name, module): + target_cls_name = target_cls_name.replace('_', '').lower() + clslib = importlib.import_module(module) + cls = None + for name, clsobj in clslib.__dict__.items(): + if name.lower() == target_cls_name: + cls = clsobj + + assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name) + + return cls + + +def tensor2im(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array, range(0, 1) + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path, aspect_ratio=1.0): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + + image_pil = Image.fromarray(image_numpy) + h, w, _ = image_numpy.shape + + if aspect_ratio is None: + pass + elif aspect_ratio > 1.0: + image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) + elif aspect_ratio < 1.0: + image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) + + +def correct_resize_label(t, size): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i, :1] + one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0)) + one_np = one_np[:, :, 0] + one_image = Image.fromarray(one_np).resize(size, Image.NEAREST) + resized_t = torch.from_numpy(np.array(one_image)).long() + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) + + +def correct_resize(t, size, mode=Image.BICUBIC): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i:i + 1] + one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) + resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) + +def draw_landmarks(img, landmark, color='r', step=2): + """ + Return: + img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) + + + Parameters: + img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) + landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction + color -- str, 'r' or 'b' (red or blue) + """ + if color =='r': + c = np.array([255., 0, 0]) + else: + c = np.array([0, 0, 255.]) + + _, H, W, _ = img.shape + img, landmark = img.copy(), landmark.copy() + landmark[..., 1] = H - 1 - landmark[..., 1] + landmark = np.round(landmark).astype(np.int32) + for i in range(landmark.shape[1]): + x, y = landmark[:, i, 0], landmark[:, i, 1] + for j in range(-step, step): + for k in range(-step, step): + u = np.clip(x + j, 0, W - 1) + v = np.clip(y + k, 0, H - 1) + for m in range(landmark.shape[0]): + img[m, v[m], u[m]] = c + return img diff --git a/MuseTalk_project/3DMM/deep_3drecon/util/visualizer.py b/MuseTalk_project/3DMM/deep_3drecon/util/visualizer.py new file mode 100644 index 00000000..4023a6d4 --- /dev/null +++ b/MuseTalk_project/3DMM/deep_3drecon/util/visualizer.py @@ -0,0 +1,227 @@ +"""This script defines the visualizer for Deep3DFaceRecon_pytorch +""" + +import numpy as np +import os +import sys +import ntpath +import time +from . import util, html +from subprocess import Popen, PIPE +from torch.utils.tensorboard import SummaryWriter + +def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): + """Save images to the disk. + + Parameters: + webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) + visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs + image_path (str) -- the string is used to create image paths + aspect_ratio (float) -- the aspect ratio of saved images + width (int) -- the images will be resized to width x width + + This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. + """ + image_dir = webpage.get_image_dir() + short_path = ntpath.basename(image_path[0]) + name = os.path.splitext(short_path)[0] + + webpage.add_header(name) + ims, txts, links = [], [], [] + + for label, im_data in visuals.items(): + im = util.tensor2im(im_data) + image_name = '%s/%s.png' % (label, name) + os.makedirs(os.path.join(image_dir, label), exist_ok=True) + save_path = os.path.join(image_dir, image_name) + util.save_image(im, save_path, aspect_ratio=aspect_ratio) + ims.append(image_name) + txts.append(label) + links.append(image_name) + webpage.add_images(ims, txts, links, width=width) + + +class Visualizer(): + """This class includes several functions that can display/save images and print/save logging information. + + It uses a Python library tensprboardX for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. + """ + + def __init__(self, opt): + """Initialize the Visualizer class + + Parameters: + opt -- stores all the experiment flags; needs to be a subclass of BaseOptions + Step 1: Cache the training/test options + Step 2: create a tensorboard writer + Step 3: create an HTML object for saveing HTML filters + Step 4: create a logging file to store training losses + """ + self.opt = opt # cache the option + self.use_html = opt.isTrain and not opt.no_html + self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name)) + self.win_size = opt.display_winsize + self.name = opt.name + self.saved = False + if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ + self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') + self.img_dir = os.path.join(self.web_dir, 'images') + print('create web directory %s...' % self.web_dir) + util.mkdirs([self.web_dir, self.img_dir]) + # create a logging file to store training losses + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + def reset(self): + """Reset the self.saved status""" + self.saved = False + + + def display_current_results(self, visuals, total_iters, epoch, save_result): + """Display current results on tensorboad; save current results to an HTML file. + + Parameters: + visuals (OrderedDict) - - dictionary of images to display or save + total_iters (int) -- total iterations + epoch (int) - - the current epoch + save_result (bool) - - if save the current results to an HTML file + """ + for label, image in visuals.items(): + self.writer.add_image(label, util.tensor2im(image), total_iters, dataformats='HWC') + + if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. + self.saved = True + # save images to the disk + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) + util.save_image(image_numpy, img_path) + + # update website + webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0) + for n in range(epoch, 0, -1): + webpage.add_header('epoch [%d]' % n) + ims, txts, links = [], [], [] + + for label, image_numpy in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = 'epoch%.3d_%s.png' % (n, label) + ims.append(img_path) + txts.append(label) + links.append(img_path) + webpage.add_images(ims, txts, links, width=self.win_size) + webpage.save() + + def plot_current_losses(self, total_iters, losses): + # G_loss_collection = {} + # D_loss_collection = {} + # for name, value in losses.items(): + # if 'G' in name or 'NCE' in name or 'idt' in name: + # G_loss_collection[name] = value + # else: + # D_loss_collection[name] = value + # self.writer.add_scalars('G_collec', G_loss_collection, total_iters) + # self.writer.add_scalars('D_collec', D_loss_collection, total_iters) + for name, value in losses.items(): + self.writer.add_scalar(name, value, total_iters) + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, iters, losses, t_comp, t_data): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message + + +class MyVisualizer: + def __init__(self, opt): + """Initialize the Visualizer class + + Parameters: + opt -- stores all the experiment flags; needs to be a subclass of BaseOptions + Step 1: Cache the training/test options + Step 2: create a tensorboard writer + Step 3: create an HTML object for saveing HTML filters + Step 4: create a logging file to store training losses + """ + self.opt = opt # cache the optio + self.name = opt.name + self.img_dir = os.path.join(opt.checkpoints_dir, opt.name, 'results') + + if opt.phase != 'test': + self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, opt.name, 'logs')) + # create a logging file to store training losses + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + + def display_current_results(self, visuals, total_iters, epoch, dataset='train', save_results=False, count=0, name=None, + add_image=True): + """Display current results on tensorboad; save current results to an HTML file. + + Parameters: + visuals (OrderedDict) - - dictionary of images to display or save + total_iters (int) -- total iterations + epoch (int) - - the current epoch + dataset (str) - - 'train' or 'val' or 'test' + """ + # if (not add_image) and (not save_results): return + + for label, image in visuals.items(): + for i in range(image.shape[0]): + image_numpy = util.tensor2im(image[i]) + if add_image: + self.writer.add_image(label + '%s_%02d'%(dataset, i + count), + image_numpy, total_iters, dataformats='HWC') + + if save_results: + save_path = os.path.join(self.img_dir, dataset, 'epoch_%s_%06d'%(epoch, total_iters)) + if not os.path.isdir(save_path): + os.makedirs(save_path) + + if name is not None: + img_path = os.path.join(save_path, '%s.png' % name) + else: + img_path = os.path.join(save_path, '%s_%03d.png' % (label, i + count)) + util.save_image(image_numpy, img_path) + + + def plot_current_losses(self, total_iters, losses, dataset='train'): + for name, value in losses.items(): + self.writer.add_scalar(name + '/%s'%dataset, value, total_iters) + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, iters, losses, t_comp, t_data, dataset='train'): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(dataset: %s, epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % ( + dataset, epoch, iters, t_comp, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message diff --git a/MuseTalk_project/3DMM/eval.py b/MuseTalk_project/3DMM/eval.py new file mode 100644 index 00000000..fb2d8a52 --- /dev/null +++ b/MuseTalk_project/3DMM/eval.py @@ -0,0 +1,160 @@ +import os, sys +import cv2 +import numpy as np +from time import time +from scipy.io import savemat +import argparse +from tqdm import tqdm, trange +import torch +import face_alignment +import deep_3drecon +from moviepy.editor import VideoFileClip +import copy +import psutil +import math + +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +fa = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D, network_size=4, device='cuda') +face_reconstructor = deep_3drecon.Reconstructor() + +# landmark detection in Deep3DRecon +def lm68_2_lm5(in_lm): + # in_lm: shape=[68,2] + lm_idx = np.array([31,37,40,43,46,49,55]) - 1 + # 将上述特殊角点的数据取出,得到5个新的角点数据,拼接起来。 + lm = np.stack([in_lm[lm_idx[0],:],np.mean(in_lm[lm_idx[[1,2]],:],0),np.mean(in_lm[lm_idx[[3,4]],:],0),in_lm[lm_idx[5],:],in_lm[lm_idx[6],:]], axis = 0) + # 将第一个角点放在了第三个位置 + lm = lm[[1,2,0,3,4],:2] + return lm + +def process_video(fname, out_name=None): + assert fname.endswith(".mp4") + print(fname) + if out_name is None: + out_name = fname[:-4] + '.npy' + tmp_name = out_name[:-4] + '.doi' + if os.path.exists(out_name): + print("out exisit, skip") + return + os.system(f"touch {tmp_name}") + cap = cv2.VideoCapture(fname) + print(f"loading video ...") + + # cap.subclip() + + # 获取视频相关参数 + num_frames = int(cap.get(7)) + h = int(cap.get(4)) + w = int(cap.get(3)) + # 检测系统资源是否充足 + mem = psutil.virtual_memory() + a_mem = mem.available + min_mem=num_frames*68*2 + num_frames*5*2 + num_frames*h*w*3 + if a_mem < min_mem: + print(f"WARNING: The physical memory is insufficient, which may result in memory swapping. Available Memory: {a_mem/1000000:.3f}M, the minimum memory required is:{min_mem/1000000:.3f}M.") + # 初始化矩阵 + lm68_arr=np.empty((num_frames, 68, 2),dtype=np.float32) + lm5_arr=np.empty((num_frames, 5, 2),dtype=np.float32) + video_rgb=np.empty((num_frames, h, w, 3),dtype=np.uint8) + cnt=0 + i=0 + while cap.isOpened(): + i += 1 + ret, frame_bgr = cap.read() + if frame_bgr is None: + break + frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB) + # if fa.get_landmarks(frame_rgb) is None: + # continue + video_rgb[cnt]=frame_rgb + cnt += 1 + + # print("rua") + # num_frames = cnt + for i in trange(num_frames, desc="extracting 2D facial landmarks ..."): + + try: + lm68 = fa.get_landmarks(video_rgb[i])[0] # 识别图片中的人脸,获得角点, shape=[68,2] + except: + print(f"WARNING: Caught errors when fa.get_landmarks, maybe No face detected at frame {i} in {fname}!") + raise ValueError("") + # lm68 = fa.get_landmarks(video_rgb[i]) # 识别图片中的人脸,获得角点, shape=[68,2] + + # if lm68 is None: + # lm68_arr[i]=lm68 + # lm5_arr[i]=lm5 + # continue + + # lm68 = lm68[0] + lm5 = lm68_2_lm5(lm68) + lm68_arr[i]=lm68 + lm5_arr[i]=lm5 + batch_size = 32 + iter_times = num_frames // batch_size + last_bs = num_frames % batch_size + coeff_lst = [] + for i_iter in range(iter_times): + start_idx = i_iter * batch_size + batched_images = video_rgb[start_idx: start_idx + batch_size] + batched_lm5 = lm5_arr[start_idx: start_idx + batch_size] + coeff, align_img = face_reconstructor.recon_coeff(batched_images, batched_lm5, return_image = True) + coeff_lst.append(coeff) + # print(last_bs) + if last_bs != 0: + batched_images = video_rgb[-last_bs:] + batched_lm5 = lm5_arr[-last_bs:] + coeff, align_img = face_reconstructor.recon_coeff(batched_images, batched_lm5, return_image = True) + coeff_lst.append(coeff) + return lm68_arr + + +def split_wav(mp4_name): + wav_name = mp4_name[:-4] + '.wav' + if os.path.exists(wav_name): + return + video = VideoFileClip(mp4_name,verbose=False) + dur = video.duration + audio = video.audio + assert audio is not None + audio.write_audiofile(wav_name,fps=16000,verbose=False,logger=None) + +if __name__ == '__main__': + ### Process Single Long video for NeRF dataset + # video_id = 'May' + # video_fname = f"data/raw/videos/{video_id}.mp4" + # out_fname = f"data/processed/videos/{video_id}/coeff.npy" + # process_video(video_fname, out_fname) + + ### Process short video clips for LRS3 dataset + from argparse import ArgumentParser + parser = ArgumentParser() + parser.add_argument('--gt_path', type=str, default='Shaheen.mp4', help='') + parser.add_argument('--infer_path', type=str, default='Shaheen_30_Shaheen.mp4', help='') + parser.add_argument('--process_id', type=int, default=0, help='') + parser.add_argument('--total_process', type=int, default=1, help='') + args = parser.parse_args() + + import os, glob + mp4_name1 = args.gt_path + mp4_name2 = args.infer_path + lm_68_1 = process_video(mp4_name1) + lm_68_2 = process_video(mp4_name2) + if len(lm_68_1) != len(lm_68_2): + print("Warning: the frame of videos is not equal.") + Len = min(len(lm_68_1),len(lm_68_2)) + loss = 0 + for i in range(Len): + frame1 = lm_68_1[i] + frame2 = lm_68_2[i] + sum = 0 + for j in range(68): + l = 1.0 + if j >= 48: + l = 1.2 + sum += l*((frame1[j][0]-frame2[j][0])**2 + (frame1[j][1]-frame2[j][1])**2) + sum += 5 + loss += math.log10(sum) + + loss /= Len + print("Loss:",loss) diff --git a/MuseTalk_project/3DMM/requirements.txt b/MuseTalk_project/3DMM/requirements.txt new file mode 100644 index 00000000..48c2f111 --- /dev/null +++ b/MuseTalk_project/3DMM/requirements.txt @@ -0,0 +1,29 @@ +numpy +pandas +transformers +scipy +scikit-learn +scikit-image +tensorflow==2.12.0 # you can flexible it, this is gpu version +tensorboard +tensorboardX +python_speech_features +resampy +opencv_python +face_alignment +matplotlib +configargparse +librosa==0.9.2 +praat-parselmouth==0.4.3 +trimesh +kornia==0.5.0 +PyMCubes +lpips +setuptools # ==59.5.0 +ffmpeg-python +moviepy +dearpygui +ninja +pyaudio # for extract esperanto +mediapipe +psutil \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/base_task.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/base_task.cpython-39.pyc new file mode 100644 index 00000000..b26eeb3c Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/base_task.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/ckpt_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/ckpt_utils.cpython-39.pyc new file mode 100644 index 00000000..791e9f5f Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/ckpt_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/dataset_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/dataset_utils.cpython-39.pyc new file mode 100644 index 00000000..91e38f2c Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/dataset_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/ddp_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/ddp_utils.cpython-39.pyc new file mode 100644 index 00000000..7a84ad89 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/ddp_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/euler2rot.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/euler2rot.cpython-39.pyc new file mode 100644 index 00000000..25727019 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/euler2rot.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/face_alignment_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/face_alignment_utils.cpython-39.pyc new file mode 100644 index 00000000..020e133f Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/face_alignment_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-38.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-38.pyc new file mode 100644 index 00000000..a860f1a9 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-38.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-39.pyc new file mode 100644 index 00000000..8a7e0d0e Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/hparams.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/image_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/image_utils.cpython-39.pyc new file mode 100644 index 00000000..890644ef Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/image_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/indexed_datasets.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/indexed_datasets.cpython-39.pyc new file mode 100644 index 00000000..4d2c3273 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/indexed_datasets.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/meters.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/meters.cpython-39.pyc new file mode 100644 index 00000000..f4ba3c75 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/meters.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/multiprocess_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/multiprocess_utils.cpython-39.pyc new file mode 100644 index 00000000..e31a1edc Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/multiprocess_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-38.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-38.pyc new file mode 100644 index 00000000..ec1b225f Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-38.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-39.pyc new file mode 100644 index 00000000..609fb9a4 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/os_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/pitch_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/pitch_utils.cpython-39.pyc new file mode 100644 index 00000000..7ae3f0da Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/pitch_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/tensor_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/tensor_utils.cpython-39.pyc new file mode 100644 index 00000000..b28d2b58 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/tensor_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/__pycache__/trainer.cpython-39.pyc b/MuseTalk_project/3DMM/utils/commons/__pycache__/trainer.cpython-39.pyc new file mode 100644 index 00000000..6da77864 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/commons/__pycache__/trainer.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/commons/base_task.py b/MuseTalk_project/3DMM/utils/commons/base_task.py new file mode 100644 index 00000000..528e1777 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/base_task.py @@ -0,0 +1,256 @@ +import logging +import os +import random +import subprocess +import sys +from datetime import datetime +import numpy as np +import torch.utils.data +from torch import nn +from torch.utils.tensorboard import SummaryWriter +from utils.commons.dataset_utils import data_loader +from utils.commons.hparams import hparams +from utils.commons.meters import AvgrageMeter +from utils.commons.tensor_utils import tensors_to_scalars +from utils.commons.trainer import Trainer + +torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) + +log_format = '%(asctime)s %(message)s' +logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=log_format, datefmt='%m/%d %I:%M:%S %p') + + +class BaseTask(nn.Module): + def __init__(self, *args, **kwargs): + super(BaseTask, self).__init__() + self.current_epoch = 0 + self.global_step = 0 + self.trainer = None + self.use_ddp = True + self.gradient_clip_norm = hparams['clip_grad_norm'] + self.gradient_clip_val = hparams.get('clip_grad_value', 0) + self.model = None + self.training_losses_meter = None + self.logger: SummaryWriter = None + + ###################### + # build model, dataloaders, optimizer, scheduler and tensorboard + ###################### + def build_model(self): + raise NotImplementedError + + @data_loader + def train_dataloader(self): + raise NotImplementedError + + @data_loader + def test_dataloader(self): + raise NotImplementedError + + @data_loader + def val_dataloader(self): + raise NotImplementedError + + def build_scheduler(self, optimizer): + return None + + def build_optimizer(self, model): + raise NotImplementedError + + def configure_optimizers(self): + optm = self.build_optimizer(self.model) + self.scheduler = self.build_scheduler(optm) + if isinstance(optm, (list, tuple)): + return optm + return [optm] + + def build_tensorboard(self, save_dir, name, **kwargs): + log_dir = os.path.join(save_dir, name) + os.makedirs(log_dir, exist_ok=True) + self.logger = SummaryWriter(log_dir=log_dir, **kwargs) + + ###################### + # training + ###################### + def on_train_start(self): + pass + + def on_train_end(self): + pass + + def on_epoch_start(self): + self.training_losses_meter = {'total_loss': AvgrageMeter()} + + def on_epoch_end(self): + loss_outputs = {k: v.avg for k, v in self.training_losses_meter.items()} + print(f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}") + loss_outputs = {"epoch_mean/"+k:v for k,v in loss_outputs.items()} + return loss_outputs + + def _training_step(self, sample, batch_idx, optimizer_idx): + """ + + :param sample: + :param batch_idx: + :return: total loss: torch.Tensor, loss_log: dict + """ + raise NotImplementedError + + def training_step(self, sample, batch_idx, optimizer_idx=-1): + """ + + :param sample: + :param batch_idx: + :param optimizer_idx: + :return: {'loss': torch.Tensor, 'progress_bar': dict, 'tb_log': dict} + """ + # perform the main training step in a specific task + loss_ret = self._training_step(sample, batch_idx, optimizer_idx) + if loss_ret is None: + return {'loss': None} + total_loss, log_outputs = loss_ret + log_outputs = tensors_to_scalars(log_outputs) + + # add to epoch meter + for k, v in log_outputs.items(): + if '/' in k: + k_split = k.split("/") + assert len(k_split) == 2, "we only support one `/` in tag_name, i.e., `/`" + k = k_split[-1] + if k not in self.training_losses_meter: + self.training_losses_meter[k] = AvgrageMeter() + if not np.isnan(v): + self.training_losses_meter[k].update(v) + self.training_losses_meter['total_loss'].update(total_loss.item()) + + if optimizer_idx >= 0: + log_outputs[f'lr_{optimizer_idx}'] = self.trainer.optimizers[optimizer_idx].param_groups[0]['lr'] + + # add to progress bar + progress_bar_log = {} + for k, v in log_outputs.items(): + if '/' in k: + k_split = k.split("/") + assert len(k_split) == 2, "we only support one `/` in tag_name, i.e., `/`" + k = k_split[-1] + assert k not in progress_bar_log, f"we got duplicate tags in log_outputs, check this `{k}`" + progress_bar_log[k] = v + + # add to progress bar + tb_log = {} + for k, v in log_outputs.items(): + if '/' in k: + tb_log[k] = v + else: + tb_log[f'tr/{k}'] = v + return { + 'loss': total_loss, + 'progress_bar': progress_bar_log, + 'tb_log': tb_log + } + + def on_before_optimization(self, opt_idx): + if self.gradient_clip_norm > 0: + torch.nn.utils.clip_grad_norm_(self.parameters(), self.gradient_clip_norm) + if self.gradient_clip_val > 0: + torch.nn.utils.clip_grad_value_(self.parameters(), self.gradient_clip_val) + + def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): + if self.scheduler is not None: + self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) + + ###################### + # validation + ###################### + def validation_start(self): + pass + + def validation_step(self, sample, batch_idx): + """ + + :param sample: + :param batch_idx: + :return: output: {"losses": {...}, "total_loss": float, ...} or (total loss: torch.Tensor, loss_log: dict) + """ + raise NotImplementedError + + def validation_end(self, outputs): + """ + + :param outputs: + :return: loss_output: dict + """ + all_losses_meter = {'total_loss': AvgrageMeter()} + for output in outputs: + if len(output) == 0 or output is None: + continue + if isinstance(output, dict): + assert 'losses' in output, 'Key "losses" should exist in validation output.' + n = output.pop('nsamples', 1) + losses = tensors_to_scalars(output['losses']) + total_loss = output.get('total_loss', sum(losses.values())) + else: + assert len(output) == 2, 'Validation output should only consist of two elements: (total_loss, losses)' + n = 1 + total_loss, losses = output + losses = tensors_to_scalars(losses) + if isinstance(total_loss, torch.Tensor): + total_loss = total_loss.item() + for k, v in losses.items(): + if k not in all_losses_meter: + all_losses_meter[k] = AvgrageMeter() + all_losses_meter[k].update(v, n) + all_losses_meter['total_loss'].update(total_loss, n) + loss_output = {k: round(v.avg, 4) for k, v in all_losses_meter.items()} + print(f"| Validation results@{self.global_step}: {loss_output}") + return { + 'tb_log': {f'val/{k}': v for k, v in loss_output.items()}, + 'val_loss': loss_output['total_loss'] + } + + ###################### + # testing + ###################### + def test_start(self): + pass + + def test_step(self, sample, batch_idx): + return self.validation_step(sample, batch_idx) + + def test_end(self, outputs): + return self.validation_end(outputs) + + ###################### + # start training/testing + ###################### + @classmethod + def start(cls): + os.environ['MASTER_PORT'] = str(random.randint(15000, 30000)) + random.seed(hparams['seed']) + np.random.seed(hparams['seed']) + work_dir = hparams['work_dir'] + trainer = Trainer( + work_dir=work_dir, + val_check_interval=hparams['val_check_interval'], + tb_log_interval=hparams['tb_log_interval'], + max_updates=hparams['max_updates'], + num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams['validate'] else 10000, + accumulate_grad_batches=hparams['accumulate_grad_batches'], + print_nan_grads=hparams['print_nan_grads'], + resume_from_checkpoint=hparams.get('resume_from_checkpoint', 0), + amp=hparams['amp'], + monitor_key=hparams['valid_monitor_key'], + monitor_mode=hparams['valid_monitor_mode'], + num_ckpt_keep=hparams['num_ckpt_keep'], + save_best=hparams['save_best'], + seed=hparams['seed'], + debug=hparams['debug'] + ) + if not hparams['infer']: # train + trainer.fit(cls) + else: + trainer.test(cls) + + def on_keyboard_interrupt(self): + pass diff --git a/MuseTalk_project/3DMM/utils/commons/ckpt_utils.py b/MuseTalk_project/3DMM/utils/commons/ckpt_utils.py new file mode 100644 index 00000000..3460c59a --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/ckpt_utils.py @@ -0,0 +1,66 @@ +import glob +import os +import re +import torch + + +def get_last_checkpoint(work_dir, steps=None): + checkpoint = None + last_ckpt_path = None + ckpt_paths = get_all_ckpts(work_dir, steps) + if len(ckpt_paths) > 0: + last_ckpt_path = ckpt_paths[0] + checkpoint = torch.load(last_ckpt_path, map_location='cpu') + return checkpoint, last_ckpt_path + + +def get_all_ckpts(work_dir, steps=None): + if steps is None: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt' + else: + ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt' + return sorted(glob.glob(ckpt_path_pattern), + key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0])) + + +def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True, steps=None): + if os.path.isfile(ckpt_base_dir): + base_dir = os.path.dirname(ckpt_base_dir) + ckpt_path = ckpt_base_dir + checkpoint = torch.load(ckpt_base_dir, map_location='cpu') + else: + base_dir = ckpt_base_dir + checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir, steps) + if checkpoint is not None: + state_dict = checkpoint["state_dict"] + if len([k for k in state_dict.keys() if '.' in k]) > 0: + state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items() + if k.startswith(f'{model_name}.')} + else: + if '.' not in model_name: + state_dict = state_dict[model_name] + else: + base_model_name = model_name.split('.')[0] + rest_model_name = model_name[len(base_model_name) + 1:] + state_dict = { + k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items() + if k.startswith(f'{rest_model_name}.')} + if not strict: + cur_model_state_dict = cur_model.state_dict() + unmatched_keys = [] + for key, param in state_dict.items(): + if key in cur_model_state_dict: + new_param = cur_model_state_dict[key] + if new_param.shape != param.shape: + unmatched_keys.append(key) + print("| Unmatched keys: ", key, new_param.shape, param.shape) + for key in unmatched_keys: + del state_dict[key] + cur_model.load_state_dict(state_dict, strict=strict) + print(f"| load '{model_name}' from '{ckpt_path}'.") + else: + e_msg = f"| ckpt not found in {base_dir}." + if force: + assert False, e_msg + else: + print(e_msg) diff --git a/MuseTalk_project/3DMM/utils/commons/crop_head.py b/MuseTalk_project/3DMM/utils/commons/crop_head.py new file mode 100644 index 00000000..e61cfc51 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/crop_head.py @@ -0,0 +1,106 @@ +import face_alignment +import os +import cv2 +import skimage.transform as trans +import argparse +import torch +import numpy as np +import tqdm + +device = 'cuda' if torch.cuda.is_available() else 'cpu' + + +def get_affine(src): + dst = np.array([[87, 59], + [137, 59], + [112, 120]], dtype=np.float32) + tform = trans.SimilarityTransform() + tform.estimate(src, dst) + M = tform.params[0:2, :] + return M + + +def affine_align_img(img, M, crop_size=224): + warped = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0) + return warped + + +def affine_align_3landmarks(landmarks, M): + new_landmarks = np.concatenate([landmarks, np.ones((3, 1))], 1) + affined_landmarks = np.matmul(new_landmarks, M.transpose()) + return affined_landmarks + + +def get_eyes_mouths(landmark): + three_points = np.zeros((3, 2)) + three_points[0] = landmark[36:42].mean(0) + three_points[1] = landmark[42:48].mean(0) + three_points[2] = landmark[60:68].mean(0) + return three_points + + +def get_mouth_bias(three_points): + bias = np.array([112, 120]) - three_points[2] + return bias + + +def align_folder(folder_path, folder_save_path): + + fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device) + preds = fa.get_landmarks_from_directory(folder_path) + + sumpoints = 0 + three_points_list = [] + + for img in tqdm.tqdm(preds.keys(), desc='preprocessing..'): + pred_points = np.array(preds[img]) + if pred_points is None or len(pred_points.shape) != 3: + print('preprocessing failed') + return False + else: + num_faces, size, _ = pred_points.shape + if num_faces == 1 and size == 68: + + three_points = get_eyes_mouths(pred_points[0]) + sumpoints += three_points + three_points_list.append(three_points) + else: + + print('preprocessing failed') + return False + avg_points = sumpoints / len(preds) + M = get_affine(avg_points) + p_bias = None + for i, img_pth in tqdm.tqdm(enumerate(preds.keys()), desc='affine and save'): + three_points = three_points_list[i] + affined_3landmarks = affine_align_3landmarks(three_points, M) + bias = get_mouth_bias(affined_3landmarks) + if p_bias is None: + bias = bias + else: + bias = p_bias * 0.2 + bias * 0.8 + p_bias = bias + M_i = M.copy() + M_i[:, 2] = M[:, 2] + bias + img = cv2.imread(img_pth) + wrapped = affine_align_img(img, M_i) + img_save_path = os.path.join(folder_save_path, img_pth.split('/')[-1]) + cv2.imwrite(img_save_path, wrapped) + print('cropped files saved at {}'.format(folder_save_path)) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--folder_path', help='the folder which needs processing') + args = parser.parse_args() + + if os.path.isdir(args.folder_path): + home_path = '/'.join(args.folder_path.split('/')[:-1]) + save_img_path = os.path.join(home_path, args.folder_path.split('/')[-1] + '_cropped') + os.makedirs(save_img_path, exist_ok=True) + + align_folder(args.folder_path, save_img_path) + + +if __name__ == '__main__': + main() diff --git a/MuseTalk_project/3DMM/utils/commons/dataset_utils.py b/MuseTalk_project/3DMM/utils/commons/dataset_utils.py new file mode 100644 index 00000000..44c2ca0c --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/dataset_utils.py @@ -0,0 +1,247 @@ +import os +import sys +import traceback +import types +from functools import wraps +from itertools import chain +import numpy as np +import torch.utils.data +from torch.utils.data import ConcatDataset +from utils.commons.hparams import hparams + + +def collate_1d_or_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): + if len(values[0].shape) == 1: + return collate_1d(values, pad_idx, left_pad, shift_right, max_len, shift_id) + else: + return collate_2d(values, pad_idx, left_pad, shift_right, max_len) + + +def collate_1d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None, shift_id=1): + """Convert a list of 1d tensors into a padded 2d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + dst[0] = shift_id + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def collate_2d(values, pad_idx=0, left_pad=False, shift_right=False, max_len=None): + """Convert a list of 2d tensors into a padded 3d tensor.""" + size = max(v.size(0) for v in values) if max_len is None else max_len + res = values[0].new(len(values), size, values[0].shape[1]).fill_(pad_idx) + + def copy_tensor(src, dst): + assert dst.numel() == src.numel() + if shift_right: + dst[1:] = src[:-1] + else: + dst.copy_(src) + + for i, v in enumerate(values): + copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) + return res + + +def _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + if len(batch) == 0: + return 0 + if len(batch) == max_sentences: + return 1 + if num_tokens > max_tokens: + return 1 + return 0 + + +def batch_by_size( + indices, num_tokens_fn, max_tokens=None, max_sentences=None, + required_batch_size_multiple=1, distributed=False +): + """ + Yield mini-batches of indices bucketed by size. Batches may contain + sequences of different lengths. + + Args: + indices (List[int]): ordered list of dataset indices + num_tokens_fn (callable): function that returns the number of tokens at + a given index + max_tokens (int, optional): max number of tokens in each batch + (default: None). + max_sentences (int, optional): max number of sentences in each + batch (default: None). + required_batch_size_multiple (int, optional): require batch size to + be a multiple of N (default: 1). + """ + max_tokens = max_tokens if max_tokens is not None else sys.maxsize + max_sentences = max_sentences if max_sentences is not None else sys.maxsize + bsz_mult = required_batch_size_multiple + + if isinstance(indices, types.GeneratorType): + indices = np.fromiter(indices, dtype=np.int64, count=-1) + + sample_len = 0 + sample_lens = [] + batch = [] + batches = [] + for i in range(len(indices)): + idx = indices[i] + num_tokens = num_tokens_fn(idx) + sample_lens.append(num_tokens) + sample_len = max(sample_len, num_tokens) + + assert sample_len <= max_tokens, ( + "sentence at index {} of size {} exceeds max_tokens " + "limit of {}!".format(idx, sample_len, max_tokens) + ) + num_tokens = (len(batch) + 1) * sample_len + + if _is_batch_full(batch, num_tokens, max_tokens, max_sentences): + mod_len = max( + bsz_mult * (len(batch) // bsz_mult), + len(batch) % bsz_mult, + ) + batches.append(batch[:mod_len]) + batch = batch[mod_len:] + sample_lens = sample_lens[mod_len:] + sample_len = max(sample_lens) if len(sample_lens) > 0 else 0 + batch.append(idx) + if len(batch) > 0: + batches.append(batch) + return batches + + +def unpack_dict_to_list(samples): + samples_ = [] + bsz = samples.get('outputs').size(0) + for i in range(bsz): + res = {} + for k, v in samples.items(): + try: + res[k] = v[i] + except: + pass + samples_.append(res) + return samples_ + + +def remove_padding(x, padding_idx=0): + if x is None: + return None + assert len(x.shape) in [1, 2] + if len(x.shape) == 2: # [T, H] + return x[np.abs(x).sum(-1) != padding_idx] + elif len(x.shape) == 1: # [T] + return x[x != padding_idx] + + +def data_loader(fn): + """ + Decorator to make any fx with this use the lazy property + :param fn: + :return: + """ + + wraps(fn) + attr_name = '_lazy_' + fn.__name__ + + def _get_data_loader(self): + try: + value = getattr(self, attr_name) + except AttributeError: + try: + value = fn(self) # Lazy evaluation, done only once. + except AttributeError as e: + # Guard against AttributeError suppression. (Issue #142) + traceback.print_exc() + error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) + raise RuntimeError(error) from e + setattr(self, attr_name, value) # Memoize evaluation. + return value + + return _get_data_loader + + +class BaseDataset(torch.utils.data.Dataset): + def __init__(self, shuffle): + super().__init__() + self.hparams = hparams + self.shuffle = shuffle + self.sort_by_len = hparams['sort_by_len'] + self.sizes = None + + @property + def _sizes(self): + return self.sizes + + def __getitem__(self, index): + raise NotImplementedError + + def collater(self, samples): + raise NotImplementedError + + def __len__(self): + return len(self._sizes) + + def num_tokens(self, index): + return self.size(index) + + def size(self, index): + """Return an example's size as a float or tuple. This value is used when + filtering a dataset with ``--max-positions``.""" + return min(self._sizes[index], hparams['max_frames']) + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.shuffle: + indices = np.random.permutation(len(self)) + if self.sort_by_len: + indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] + else: + indices = np.arange(len(self)) + return indices + + @property + def num_workers(self): + return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) + + +class BaseConcatDataset(ConcatDataset): + def collater(self, samples): + return self.datasets[0].collater(samples) + + @property + def _sizes(self): + if not hasattr(self, 'sizes'): + self.sizes = list(chain.from_iterable([d._sizes for d in self.datasets])) + return self.sizes + + def size(self, index): + return min(self._sizes[index], hparams['max_frames']) + + def num_tokens(self, index): + return self.size(index) + + def ordered_indices(self): + """Return an ordered list of indices. Batches will be constructed based + on this order.""" + if self.datasets[0].shuffle: + indices = np.random.permutation(len(self)) + if self.datasets[0].sort_by_len: + indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] + else: + indices = np.arange(len(self)) + return indices + + @property + def num_workers(self): + return self.datasets[0].num_workers diff --git a/MuseTalk_project/3DMM/utils/commons/ddp_utils.py b/MuseTalk_project/3DMM/utils/commons/ddp_utils.py new file mode 100644 index 00000000..4b529198 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/ddp_utils.py @@ -0,0 +1,137 @@ +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _find_tensors +import torch.optim +import torch.utils.data +import torch +from packaging import version + +class DDP(DistributedDataParallel): + """ + Override the forward call in lightning so it goes to training and validation step respectively + """ + + def forward(self, *inputs, **kwargs): # pragma: no cover + if version.parse(torch.__version__[:6]) < version.parse("1.11"): + self._sync_params() + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + assert len(self.device_ids) == 1 + if self.module.training: + output = self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + output = self.module.test_step(*inputs[0], **kwargs[0]) + else: + output = self.module.validation_step(*inputs[0], **kwargs[0]) + if torch.is_grad_enabled(): + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + from torch.nn.parallel.distributed import \ + logging, Join, _DDPSink, _tree_flatten_with_rref, _tree_unflatten_with_rref + with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.logger.set_runtime_stats_and_log() + self.num_iterations += 1 + self.reducer.prepare_for_forward() + + # Notify the join context that this process has not joined, if + # needed + work = Join.notify_join_context(self) + if work: + self.reducer._set_forward_pass_work_handle( + work, self._divide_by_initial_world_size + ) + + # Calling _rebuild_buckets before forward compuation, + # It may allocate new buckets before deallocating old buckets + # inside _rebuild_buckets. To save peak memory usage, + # call _rebuild_buckets before the peak memory usage increases + # during forward computation. + # This should be called only once during whole training period. + if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): + logging.info("Reducer buckets have been rebuilt in this iteration.") + self._has_rebuilt_buckets = True + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + buffer_hook_registered = hasattr(self, 'buffer_hook') + if self._check_sync_bufs_pre_fwd(): + self._sync_buffers() + + if self._join_config.enable: + # Notify joined ranks whether they should sync in backwards pass or not. + self._check_global_requires_backward_grad_sync(is_joined_rank=False) + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if self.module.training: + output = self.module.training_step(*inputs[0], **kwargs[0]) + elif self.module.testing: + output = self.module.test_step(*inputs[0], **kwargs[0]) + else: + output = self.module.validation_step(*inputs[0], **kwargs[0]) + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_post_fwd(): + self._sync_buffers() + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.require_forward_param_sync = True + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters and not self.static_graph: + # Do not need to populate this for static graph. + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + self.require_forward_param_sync = False + + # TODO: DDPSink is currently enabled for unused parameter detection and + # static graph training for first iteration. + if (self.find_unused_parameters and not self.static_graph) or ( + self.static_graph and self.num_iterations == 1 + ): + state_dict = { + 'static_graph': self.static_graph, + 'num_iterations': self.num_iterations, + } + + output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref( + output + ) + output_placeholders = [None for _ in range(len(output_tensor_list))] + # Do not touch tensors that have no grad_fn, which can cause issues + # such as https://github.com/pytorch/pytorch/issues/60733 + for i, output in enumerate(output_tensor_list): + if torch.is_tensor(output) and output.grad_fn is None: + output_placeholders[i] = output + + # When find_unused_parameters=True, makes tensors which require grad + # run through the DDPSink backward pass. When not all outputs are + # used in loss, this makes those corresponding tensors receive + # undefined gradient which the reducer then handles to ensure + # param.grad field is not touched and we don't error out. + passthrough_tensor_list = _DDPSink.apply( + self.reducer, + state_dict, + *output_tensor_list, + ) + for i in range(len(output_placeholders)): + if output_placeholders[i] is None: + output_placeholders[i] = passthrough_tensor_list[i] + + # Reconstruct output data structure. + output = _tree_unflatten_with_rref( + output_placeholders, treespec, output_is_rref + ) + return output diff --git a/MuseTalk_project/3DMM/utils/commons/euler2rot.py b/MuseTalk_project/3DMM/utils/commons/euler2rot.py new file mode 100644 index 00000000..9a9202d6 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/euler2rot.py @@ -0,0 +1,37 @@ +import torch +from scipy.spatial.transform import Rotation as R +from utils.commons.tensor_utils import convert_to_tensor + + +def rot2euler(rot, use_radian=True): + r = R.from_matrix(rot) + return r.as_euler('xyz', degrees=not use_radian) + +def euler2rot(euler, use_radian=True): + r = R.from_euler('xyz',euler, degrees=not use_radian) + return r.as_matrix() + +def c2w_to_euler_trans(c2w): + if c2w.ndim == 3: + e = rot2euler(c2w[:, :3, :3]) # [B, 3] + t = c2w[:, :3, 3].reshape([-1, 3]) + else: + e = rot2euler(c2w[:3, :3]) # [B, 3] + t = c2w[:3, 3].reshape([3]) + return e, t # [3+3] + +def euler_trans_2_c2w(euler, trans): + if euler.ndim == 2: + rot = euler2rot(euler) # [b, 3, 3] + bs = trans.shape[0] + trans = trans.reshape([bs, 3, 1]) + rot = convert_to_tensor(rot).float() + trans = convert_to_tensor(trans).float() + c2w = torch.cat([rot, trans], dim=-1) # [b, 3, 4] + else: + rot = euler2rot(euler) # [3, 3] + trans = trans.reshape([3, 1]) + rot = convert_to_tensor(rot).float() + trans = convert_to_tensor(trans).float() + c2w = torch.cat([rot, trans], dim=-1) # [3, 4] + return c2w \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/commons/face_alignment_utils.py b/MuseTalk_project/3DMM/utils/commons/face_alignment_utils.py new file mode 100644 index 00000000..04e0b6b8 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/face_alignment_utils.py @@ -0,0 +1,22 @@ +import numpy as np + +yaw_idx_in_mediapipe_mesh = [356, 454, 361, 288, 397, 379, 378, 377, 152, 148, 149, 150, 172,58, 132, 234, 127] +brow_idx_in_mediapipe_mesh = [70, 63, 105, 66, 107, 336, 296, 334, 293, 300] +nose_idx_in_mediapipe_mesh = [6, 5, 1, 2, 129, 240, 2, 460, 358] +eye_idx_in_mediapipe_mesh = [33, 160, 158, 133, 153, 144, 362, 385, 387, 263, 373, 380] +mouth_idx_in_mediapipe_mesh = [61, 40, 37, 0, 267, 270, 291, 321, 314, 17, 84, 91, 78, 81, 13, 311, 308, 402, 14, 178] +lm68_idx_in_mediapipe_mesh = yaw_idx_in_mediapipe_mesh + brow_idx_in_mediapipe_mesh + nose_idx_in_mediapipe_mesh + eye_idx_in_mediapipe_mesh + mouth_idx_in_mediapipe_mesh + +def mediapipe_lm478_to_face_alignment_lm68(lm478, H, W, return_2d=True): + """ + lm478: [B, 478, 3] or [478,3] + """ + lm478[..., 0] *= W + lm478[..., 1] *= H + n_dim = 2 if return_2d else 3 + if lm478.ndim == 2: + return lm478[lm68_idx_in_mediapipe_mesh, :n_dim].astype(np.int16) + elif lm478.ndim == 3: + return lm478[:, lm68_idx_in_mediapipe_mesh, :n_dim].astype(np.int16) + else: + raise ValueError("input lm478 ndim should in 2 or 3!") \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/commons/hparams.py b/MuseTalk_project/3DMM/utils/commons/hparams.py new file mode 100644 index 00000000..49e066f7 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/hparams.py @@ -0,0 +1,132 @@ +import argparse +import os +import yaml + +from utils.commons.os_utils import remove_file + +global_print_hparams = True +hparams = {} + + +class Args: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + self.__setattr__(k, v) + + +def override_config(old_config: dict, new_config: dict): + for k, v in new_config.items(): + if isinstance(v, dict) and k in old_config: + override_config(old_config[k], new_config[k]) + else: + old_config[k] = v + + +def set_hparams(config='', exp_name='', hparams_str='', print_hparams=True, global_hparams=True): + if config == '' and exp_name == '': + parser = argparse.ArgumentParser(description='') + parser.add_argument('--config', type=str, default='', + help='location of the data corpus') + parser.add_argument('--exp_name', type=str, default='', help='exp_name') + parser.add_argument('-hp', '--hparams', type=str, default='', + help='location of the data corpus') + parser.add_argument('--infer', action='store_true', help='infer') + parser.add_argument('--validate', action='store_true', help='validate') + parser.add_argument('--reset', action='store_true', help='reset hparams') + parser.add_argument('--remove', action='store_true', help='remove old ckpt') + parser.add_argument('--debug', action='store_true', help='debug') + args, unknown = parser.parse_known_args() + print("| Unknow hparams: ", unknown) + else: + args = Args(config=config, exp_name=exp_name, hparams=hparams_str, + infer=False, validate=False, reset=False, debug=False, remove=False) + global hparams + assert args.config != '' or args.exp_name != '' + if args.config != '': + assert os.path.exists(args.config) + + config_chains = [] + loaded_config = set() + + def load_config(config_fn): + # deep first inheritance and avoid the second visit of one node + if not os.path.exists(config_fn): + return {} + with open(config_fn) as f: + hparams_ = yaml.safe_load(f) + loaded_config.add(config_fn) + if 'base_config' in hparams_: + ret_hparams = {} + if not isinstance(hparams_['base_config'], list): + hparams_['base_config'] = [hparams_['base_config']] + for c in hparams_['base_config']: + if c.startswith('.'): + c = f'{os.path.dirname(config_fn)}/{c}' + c = os.path.normpath(c) + if c not in loaded_config: + override_config(ret_hparams, load_config(c)) + override_config(ret_hparams, hparams_) + else: + ret_hparams = hparams_ + config_chains.append(config_fn) + return ret_hparams + + saved_hparams = {} + args_work_dir = '' + if args.exp_name != '': + args_work_dir = f'checkpoints/{args.exp_name}' + ckpt_config_path = f'{args_work_dir}/config.yaml' + if os.path.exists(ckpt_config_path): + with open(ckpt_config_path) as f: + saved_hparams_ = yaml.safe_load(f) + if saved_hparams_ is not None: + saved_hparams.update(saved_hparams_) + hparams_ = {} + if args.config != '': + hparams_.update(load_config(args.config)) + if not args.reset: + hparams_.update(saved_hparams) + if args.exp_name != '': + hparams_['work_dir'] = args_work_dir + + # Support config overriding in command line. Support list type config overriding. + # Examples: --hparams="a=1,b.c=2,d=[1 1 1]" + if args.hparams != "": + for new_hparam in args.hparams.split(","): + k, v = new_hparam.split("=") + v = v.strip("\'\" ") + config_node = hparams_ + for k_ in k.split(".")[:-1]: + config_node = config_node[k_] + k = k.split(".")[-1] + if v in ['True', 'False'] or type(config_node[k]) in [bool, list, dict]: + if type(config_node[k]) == list: + v = v.replace(" ", ",") + config_node[k] = eval(v) + else: + config_node[k] = type(config_node[k])(v) + if args_work_dir != '' and args.remove: + answer = input("REMOVE old checkpoint? Y/N [Default: N]: ") + if answer.lower() == "y": + remove_file(args_work_dir) + if args_work_dir != '' and (not os.path.exists(ckpt_config_path) or args.reset) and not args.infer: + os.makedirs(hparams_['work_dir'], exist_ok=True) + with open(ckpt_config_path, 'w') as f: + yaml.safe_dump(hparams_, f) + + hparams_['infer'] = args.infer + hparams_['debug'] = args.debug + hparams_['validate'] = args.validate + hparams_['exp_name'] = args.exp_name + global global_print_hparams + if global_hparams: + hparams.clear() + hparams.update(hparams_) + if print_hparams and global_print_hparams and global_hparams: + print('| Hparams chains: ', config_chains) + print('| Hparams: ') + for i, (k, v) in enumerate(sorted(hparams_.items())): + print(f"\033[;33;m{k}\033[0m: {v}, ", end="\n" if i % 5 == 4 else "") + print("") + global_print_hparams = False + return hparams_ diff --git a/MuseTalk_project/3DMM/utils/commons/image_utils.py b/MuseTalk_project/3DMM/utils/commons/image_utils.py new file mode 100644 index 00000000..6f836246 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/image_utils.py @@ -0,0 +1,39 @@ +import numpy as np +import torch +import cv2 +import os +import imageio + + +def to8b(x): + return (255*np.clip(x, 0, 1)).astype(np.uint8) + +def mse2psnr(x): + return -10. * torch.log(x) / torch.log(torch.Tensor([10.])) + +def img2mse(x, y): + return torch.mean((x - y) ** 2) + +def video2images(video_name, out_dir): + cap = cv2.VideoCapture(video_name) + frame_num = 0 + while(True): + _, frame = cap.read() + if frame is None: + break + out_frame_name = os.path.join(out_dir, str(frame_num) + '.jpg') + cv2.imwrite(out_frame_name, frame) + frame_num += + 1 + cap.release() + +def load_image_as_uint8_tensor(fname): + """ + img: (H, W, 3) floatTensor + """ + img = torch.as_tensor(imageio.imread(fname)) + return img + +if __name__ =='__main__': + video2images("test_data/May_val/AD-NeRF.mp4", "test_data/May_val/AD-NeRF") + video2images("test_data/May_val/GeneFace.mp4", "test_data/May_val/GeneFace") + video2images("test_data/May_val/GT.mp4", "test_data/May_val/GT") \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/commons/indexed_datasets.py b/MuseTalk_project/3DMM/utils/commons/indexed_datasets.py new file mode 100644 index 00000000..4b6e1d21 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/indexed_datasets.py @@ -0,0 +1,200 @@ +import pickle +from bisect import bisect +from copy import deepcopy +import numpy as np +import gzip + + +def int2bytes(i: int, *, signed: bool = False) -> bytes: + length = ((i + ((i * signed) < 0)).bit_length() + 7 + signed) // 8 + return i.to_bytes(length, byteorder='little', signed=signed) + + +def bytes2int(b: bytes, *, signed: bool = False) -> int: + return int.from_bytes(b, byteorder='little', signed=signed) + + +def load_index_data(data_file): + index_data_size = bytes2int(data_file.read(32)) + index_data = data_file.read(index_data_size) + index_data = pickle.loads(index_data) + data_offsets = deepcopy(index_data['offsets']) + id2pos = deepcopy(index_data.get('id2pos', {})) + meta = deepcopy(index_data.get('meta', {})) + return data_offsets, id2pos, meta + + +class IndexedDataset: + def __init__(self, path, unpickle=True): + self.path = path + self.root_data_file = open(f"{path}.data", 'rb', buffering=-1) + try: + self.byte_offsets, self.id2pos, self.meta = load_index_data(self.root_data_file) + self.data_files = [self.root_data_file] + except: + self.__init__old(path) + self.meta = {} + self.gzip = self.meta.get('gzip', False) + if 'chunk_begin' not in self.meta: + self.meta['chunk_begin'] = [0] + for i in range(len(self.meta['chunk_begin'][1:])): + self.data_files.append(open(f"{self.path}.{i + 1}.data", 'rb')) + self.unpickle = unpickle + + def __init__old(self, path): + self.path = path + index_data = np.load(f"{path}.idx", allow_pickle=True).item() + self.byte_offsets = index_data['offsets'] + self.id2pos = index_data.get('id2pos', {}) + self.data_files = [open(f"{path}.data", 'rb', buffering=-1)] + + def __getitem__(self, i): + if self.id2pos is not None and len(self.id2pos) > 0: + i = self.id2pos[i] + self.check_index(i) + + chunk_id = bisect(self.meta['chunk_begin'][1:], self.byte_offsets[i]) + data_file = open(f"{self.path}.data", 'rb', buffering=-1) + data_file.seek(self.byte_offsets[i] - self.meta['chunk_begin'][chunk_id]) + b = data_file.read(self.byte_offsets[i + 1] - self.byte_offsets[i]) + data_file.close() + + # chunk_id = bisect(self.meta['chunk_begin'][1:], self.byte_offsets[i]) + # data_file = self.data_files[chunk_id] + # data_file.seek(self.byte_offsets[i] - self.meta['chunk_begin'][chunk_id]) + # b = data_file.read(self.byte_offsets[i + 1] - self.byte_offsets[i]) + + unpickle = self.unpickle + if unpickle: + if self.gzip: + b = gzip.decompress(b) + item = pickle.loads(b) + else: + item = b + return item + + def __del__(self): + for data_file in self.data_files: + data_file.close() + + def check_index(self, i): + if i < 0 or i >= len(self.byte_offsets) - 1: + raise IndexError('index out of range') + + def __len__(self): + return len(self.byte_offsets) - 1 + + def __iter__(self): + self.iter_i = 0 + return self + + def __next__(self): + if self.iter_i == len(self): + raise StopIteration + else: + item = self[self.iter_i] + self.iter_i += 1 + return item + + +class IndexedDatasetBuilder: + def __init__(self, path, append=False, max_size=1024 * 1024 * 1024 * 64, + default_idx_size=1024 * 1024 * 16, gzip=False): + self.path = self.root_path = path + self.default_idx_size = default_idx_size + if append: + self.data_file = open(f"{path}.data", 'r+b') + self.data_file.seek(0) + self.byte_offsets, self.id2pos, self.meta = load_index_data(self.data_file) + self.data_file.seek(0) + self.data_file.write(bytes(default_idx_size)) + self.data_file.seek(self.byte_offsets[-1]) + self.gzip = self.meta['gzip'] + else: + self.data_file = open(f"{path}.data", 'wb') + self.data_file.seek(default_idx_size) + self.byte_offsets = [default_idx_size] + self.id2pos = {} + self.meta = {} + self.meta['chunk_begin'] = [0] + self.gzip = self.meta['gzip'] = gzip + self.root_data_file = self.data_file + self.max_size = max_size + self.data_chunk_id = 0 + + def add_item(self, item, id=None, use_pickle=True): + if self.byte_offsets[-1] > self.meta['chunk_begin'][-1] + self.max_size: + if self.data_file != self.root_data_file: + self.data_file.close() + self.data_chunk_id += 1 + self.data_file = open(f"{self.path}.{self.data_chunk_id}.data", 'wb') + self.data_file.seek(0) + self.meta['chunk_begin'].append(self.byte_offsets[-1]) + if not use_pickle: + s = item + else: + s = pickle.dumps(item) + if self.gzip: + s = gzip.compress(s, 1) + bytes = self.data_file.write(s) + if id is not None: + self.id2pos[id] = len(self.byte_offsets) - 1 + self.byte_offsets.append(self.byte_offsets[-1] + bytes) + + def finalize(self): + self.root_data_file.seek(0) + s = pickle.dumps({'offsets': self.byte_offsets, 'id2pos': self.id2pos, 'meta': self.meta}) + assert len(s) < self.default_idx_size, (len(s), self.default_idx_size) + len_bytes = int2bytes(len(s)) + self.root_data_file.write(len_bytes) + self.root_data_file.seek(32) + self.root_data_file.write(s) + self.root_data_file.close() + try: + self.data_file.close() + except: + pass + + +if __name__ == "__main__": + import random + from tqdm import tqdm + + # builder = IndexedDatasetBuilder(ds_path, append=True) + # for i in tqdm(range(size)): + # builder.add_item(items[i], i + size) + # builder.finalize() + # ds = IndexedDataset(ds_path) + # for i in tqdm(range(1000)): + # idx = random.randint(size, 2 * size - 1) + # assert (ds[idx]['a'] == items[idx - size]['a']).all() + # idx = random.randint(0, size - 1) + # assert (ds[idx]['a'] == items[idx]['a']).all() + + ds_path = '/tmp/indexed_ds_example' + size = 100 + items = [{"a": np.random.normal(size=[10000, 10]), + "b": np.random.normal(size=[10000, 10])} for i in range(size)] + builder = IndexedDatasetBuilder(ds_path, max_size=1024 * 1024 * 40) + builder.meta['lengths'] = [1, 2, 3] + for i in tqdm(range(size)): + builder.add_item(pickle.dumps(items[i]), i, use_pickle=False) + builder.finalize() + ds = IndexedDataset(ds_path) + assert ds.meta['lengths'] == [1, 2, 3] + for i in tqdm(range(1000)): + idx = random.randint(0, size - 1) + assert (ds[idx]['a'] == items[idx]['a']).all() + + # builder = IndexedDataset2Builder(ds_path, append=True) + # builder.meta['lengths'] = [1, 2, 3, 5, 6, 7] + # for i in tqdm(range(size)): + # builder.add_item(items[i], i + size) + # builder.finalize() + # ds = IndexedDataset2(ds_path) + # assert ds.meta['lengths'] == [1, 2, 3, 5, 6, 7] + # for i in tqdm(range(1000)): + # idx = random.randint(size, 2 * size - 1) + # assert (ds[idx]['a'] == items[idx - size]['a']).all() + # idx = random.randint(0, size - 1) + # assert (ds[idx]['a'] == items[idx]['a']).all() diff --git a/MuseTalk_project/3DMM/utils/commons/meters.py b/MuseTalk_project/3DMM/utils/commons/meters.py new file mode 100644 index 00000000..e38790e9 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/meters.py @@ -0,0 +1,42 @@ +import time +import torch + + +class AvgrageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +class Timer: + timer_map = {} + + def __init__(self, name, enable=False): + if name not in Timer.timer_map: + Timer.timer_map[name] = 0 + self.name = name + self.enable = enable + + def __enter__(self): + if self.enable: + if torch.cuda.is_available(): + torch.cuda.synchronize() + self.t = time.time() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.enable: + if torch.cuda.is_available(): + torch.cuda.synchronize() + Timer.timer_map[self.name] += time.time() - self.t + if self.enable: + print(f'[Timer] {self.name}: {Timer.timer_map[self.name]}') diff --git a/MuseTalk_project/3DMM/utils/commons/multiprocess_utils.py b/MuseTalk_project/3DMM/utils/commons/multiprocess_utils.py new file mode 100644 index 00000000..e2773543 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/multiprocess_utils.py @@ -0,0 +1,130 @@ +import os +import traceback +from functools import partial +from tqdm import tqdm + + +def chunked_worker(worker_id, args_queue=None, results_queue=None, init_ctx_func=None): + ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None + while True: + args = args_queue.get() + if args == '': + return + job_idx, map_func, arg = args + try: + map_func_ = partial(map_func, ctx=ctx) if ctx is not None else map_func + if isinstance(arg, dict): + res = map_func_(**arg) + elif isinstance(arg, (list, tuple)): + res = map_func_(*arg) + else: + res = map_func_(arg) + results_queue.put((job_idx, res)) + except: + traceback.print_exc() + results_queue.put((job_idx, None)) + + +class MultiprocessManager: + def __init__(self, num_workers=None, init_ctx_func=None, multithread=False, queue_max=-1): + if multithread: + from multiprocessing.dummy import Queue, Process + else: + from multiprocessing import Queue, Process + if num_workers is None: + num_workers = int(os.getenv('N_PROC', os.cpu_count())) + self.num_workers = num_workers + self.results_queue = Queue(maxsize=-1) + self.jobs_pending = [] + self.args_queue = Queue(maxsize=queue_max) + self.workers = [] + self.total_jobs = 0 + self.multithread = multithread + for i in range(num_workers): + if multithread: + p = Process(target=chunked_worker, + args=(i, self.args_queue, self.results_queue, init_ctx_func)) + else: + p = Process(target=chunked_worker, + args=(i, self.args_queue, self.results_queue, init_ctx_func), + daemon=True) + self.workers.append(p) + p.start() + + def add_job(self, func, args): + if not self.args_queue.full(): + self.args_queue.put((self.total_jobs, func, args)) + else: + self.jobs_pending.append((self.total_jobs, func, args)) + self.total_jobs += 1 + + def get_results(self): + self.n_finished = 0 + while self.n_finished < self.total_jobs: + while len(self.jobs_pending) > 0 and not self.args_queue.full(): + self.args_queue.put(self.jobs_pending[0]) + self.jobs_pending = self.jobs_pending[1:] + job_id, res = self.results_queue.get() + yield job_id, res + self.n_finished += 1 + for w in range(self.num_workers): + self.args_queue.put("") + for w in self.workers: + w.join() + + def close(self): + if not self.multithread: + for w in self.workers: + w.terminate() + + def __len__(self): + return self.total_jobs + + +def multiprocess_run_tqdm(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, + multithread=False, queue_max=-1, desc=None): + for i, res in tqdm( + multiprocess_run(map_func, args, num_workers, ordered, init_ctx_func, multithread, + queue_max=queue_max), + total=len(args), desc=desc): + yield i, res + + +def multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, multithread=False, + queue_max=-1): + """ + Multiprocessing running chunked jobs. + + Examples: + >>> for res in tqdm(multiprocess_run(job_func, args): + >>> print(res) + + :param map_func: + :param args: + :param num_workers: + :param ordered: + :param init_ctx_func: + :param q_max_size: + :param multithread: + :return: + """ + if num_workers is None: + num_workers = int(os.getenv('N_PROC', os.cpu_count())) + # num_workers = 1 + manager = MultiprocessManager(num_workers, init_ctx_func, multithread, queue_max=queue_max) + for arg in args: + manager.add_job(map_func, arg) + if ordered: + n_jobs = len(args) + results = ['' for _ in range(n_jobs)] + i_now = 0 + for job_i, res in manager.get_results(): + results[job_i] = res + while i_now < n_jobs and (not isinstance(results[i_now], str) or results[i_now] != ''): + yield i_now, results[i_now] + results[i_now] = None + i_now += 1 + else: + for job_i, res in manager.get_results(): + yield job_i, res + manager.close() diff --git a/MuseTalk_project/3DMM/utils/commons/os_utils.py b/MuseTalk_project/3DMM/utils/commons/os_utils.py new file mode 100644 index 00000000..4567d17c --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/os_utils.py @@ -0,0 +1,20 @@ +import os +import subprocess + + +def link_file(from_file, to_file): + subprocess.check_call( + f'ln -s "`realpath --relative-to="{os.path.dirname(to_file)}" "{from_file}"`" "{to_file}"', shell=True) + + +def move_file(from_file, to_file): + subprocess.check_call(f'mv "{from_file}" "{to_file}"', shell=True) + + +def copy_file(from_file, to_file): + subprocess.check_call(f'cp -r "{from_file}" "{to_file}"', shell=True) + + +def remove_file(*fns): + for f in fns: + subprocess.check_call(f'rm -rf "{f}"', shell=True) diff --git a/MuseTalk_project/3DMM/utils/commons/pitch_utils.py b/MuseTalk_project/3DMM/utils/commons/pitch_utils.py new file mode 100644 index 00000000..ec0a63ce --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/pitch_utils.py @@ -0,0 +1,37 @@ +import numpy as np +import torch + +f0_bin = 256 +f0_max = 1100.0 +f0_min = 50.0 +f0_mel_min = 1127 * np.log(1 + f0_min / 700) +f0_mel_max = 1127 * np.log(1 + f0_max / 700) + +def coarse_to_f0(coarse): + uv = coarse == 1 + f0_mel = (coarse - 1) * (f0_mel_max - f0_mel_min) / (f0_bin - 2) + f0_mel_min + f0 = ((f0_mel / 1127).exp() - 1) * 700 + f0[uv] = 0 + return f0 + +def f0_to_coarse(f0): + is_torch = isinstance(f0, torch.Tensor) + f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) + f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 + + f0_mel[f0_mel <= 1] = 1 + f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 + f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int) + assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min(), f0.min(), f0.max()) + return f0_coarse + + +def norm_f0(f0, uv, hparams): + is_torch = isinstance(f0, torch.Tensor) + if hparams['pitch_norm'] == 'standard': + f0 = (f0 - hparams['f0_mean']) / hparams['f0_std'] + if hparams['pitch_norm'] == 'log': + f0 = torch.log2(f0 + 1e-8) if is_torch else np.log2(f0 + 1e-8) + if uv is not None and hparams['use_uv']: + f0[uv > 0] = 0 + return f0 \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/commons/tensor_utils.py b/MuseTalk_project/3DMM/utils/commons/tensor_utils.py new file mode 100644 index 00000000..e2e1c8b1 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/tensor_utils.py @@ -0,0 +1,111 @@ +import torch +import torch.distributed as dist +import numpy as np + + +def reduce_tensors(metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + dist.all_reduce(v) + v = v / dist.get_world_size() + if type(v) is dict: + v = reduce_tensors(v) + new_metrics[k] = v + return new_metrics + + +def tensors_to_scalars(tensors): + if isinstance(tensors, torch.Tensor): + tensors = tensors.item() + return tensors + elif isinstance(tensors, dict): + new_tensors = {} + for k, v in tensors.items(): + v = tensors_to_scalars(v) + new_tensors[k] = v + return new_tensors + elif isinstance(tensors, list): + return [tensors_to_scalars(v) for v in tensors] + else: + return tensors + + +def convert_to_np(tensors): + if isinstance(tensors, np.ndarray): + return tensors + elif isinstance(tensors, dict): + new_np = {} + for k, v in tensors.items(): + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = convert_to_np(v) + new_np[k] = v + elif isinstance(tensors, list): + new_np = [] + for v in tensors: + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = convert_to_np(v) + new_np.append(v) + elif isinstance(tensors, torch.Tensor): + v = tensors + if isinstance(v, torch.Tensor): + v = v.cpu().numpy() + if type(v) is dict: + v = convert_to_np(v) + new_np = v + else: + raise Exception(f'tensors_to_np does not support type {type(tensors)}.') + return new_np + + +def convert_to_tensor(arrays): + if isinstance(arrays, np.ndarray): + v = torch.from_numpy(arrays).float() + ret = v + elif isinstance(arrays, torch.Tensor): + ret = arrays + elif type(arrays) is dict: + ret = {} + for k, v in arrays.items(): + if isinstance(v, np.ndarray): + v = torch.from_numpy(v).float() + if type(v) is dict: + v = convert_to_tensor(v) + ret[k] = v + return ret + +def move_to_cpu(tensors): + ret = {} + for k, v in tensors.items(): + if isinstance(v, torch.Tensor): + v = v.cpu() + if type(v) is dict: + v = move_to_cpu(v) + ret[k] = v + return ret + + +def move_to_cuda(batch, gpu_id=0): + # base case: object can be directly moved using `cuda` or `to` + if callable(getattr(batch, 'cuda', None)): + return batch.cuda(gpu_id, non_blocking=True) + elif callable(getattr(batch, 'to', None)): + return batch.to(torch.device('cuda', gpu_id), non_blocking=True) + elif isinstance(batch, list): + for i, x in enumerate(batch): + batch[i] = move_to_cuda(x, gpu_id) + return batch + elif isinstance(batch, tuple): + batch = list(batch) + for i, x in enumerate(batch): + batch[i] = move_to_cuda(x, gpu_id) + return tuple(batch) + elif isinstance(batch, dict): + for k, v in batch.items(): + batch[k] = move_to_cuda(v, gpu_id) + return batch + return batch diff --git a/MuseTalk_project/3DMM/utils/commons/trainer.py b/MuseTalk_project/3DMM/utils/commons/trainer.py new file mode 100644 index 00000000..744d9278 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/commons/trainer.py @@ -0,0 +1,562 @@ +import random +import subprocess +import traceback +from datetime import datetime + +from torch.cuda.amp import GradScaler, autocast +import numpy as np +import torch.optim +import torch.utils.data +import copy +import logging +import os +import re +import sys +import torch +import torch.distributed as dist +import torch.multiprocessing as mp +import tqdm + +from utils.commons.ckpt_utils import get_last_checkpoint, get_all_ckpts +from utils.commons.ddp_utils import DDP +from utils.commons.hparams import hparams +from utils.commons.tensor_utils import move_to_cuda +from utils.commons.os_utils import remove_file + + +class Tee(object): + def __init__(self, name, mode): + self.file = open(name, mode) + self.stdout = sys.stdout + sys.stdout = self + + def __del__(self): + sys.stdout = self.stdout + self.file.close() + + def write(self, data): + self.file.write(data) + self.stdout.write(data) + + def flush(self): + self.file.flush() + + +class Trainer: + def __init__( + self, + work_dir, + default_save_path=None, + accumulate_grad_batches=1, + max_updates=160000, + print_nan_grads=False, + val_check_interval=2000, + num_sanity_val_steps=5, + amp=False, + # tb logger + log_save_interval=100, + tb_log_interval=10, + # checkpoint + monitor_key='val_loss', + monitor_mode='min', + num_ckpt_keep=5, + save_best=True, + resume_from_checkpoint=0, + seed=1234, + debug=False, + ): + os.makedirs(work_dir, exist_ok=True) + self.work_dir = work_dir + self.accumulate_grad_batches = accumulate_grad_batches + self.max_updates = max_updates + self.num_sanity_val_steps = num_sanity_val_steps + self.print_nan_grads = print_nan_grads + self.default_save_path = default_save_path + self.resume_from_checkpoint = resume_from_checkpoint if resume_from_checkpoint > 0 else None + self.seed = seed + self.debug = debug + # model and optm + self.task = None + self.optimizers = [] + + # trainer state + self.testing = False + self.global_step = 0 + self.current_epoch = 0 + self.total_batches = 0 + + # configure checkpoint + self.monitor_key = monitor_key + self.num_ckpt_keep = num_ckpt_keep + self.save_best = save_best + self.monitor_op = np.less if monitor_mode == 'min' else np.greater + self.best_val_results = np.Inf if monitor_mode == 'min' else -np.Inf + self.mode = 'min' + + # allow int, string and gpu list + self.all_gpu_ids = [ + int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != ''] + self.num_gpus = len(self.all_gpu_ids) + self.on_gpu = self.num_gpus > 0 + self.root_gpu = 0 + logging.info(f'GPU available: {torch.cuda.is_available()}, GPU used: {self.all_gpu_ids}') + self.use_ddp = self.num_gpus > 1 + self.proc_rank = 0 + # Tensorboard logging + self.log_save_interval = log_save_interval + self.val_check_interval = val_check_interval + self.tb_log_interval = tb_log_interval + self.amp = amp + self.amp_scalar = GradScaler() + + def test(self, task_cls): + self.testing = True + self.fit(task_cls) + + def fit(self, task_cls): + if len(self.all_gpu_ids) > 1: + mp.spawn(self.ddp_run, nprocs=self.num_gpus, args=(task_cls, copy.deepcopy(hparams))) + else: + self.task = task_cls() + self.task.trainer = self + self.run_single_process(self.task) + return 1 + + def ddp_run(self, gpu_idx, task_cls, hparams_): + hparams.update(hparams_) + self.proc_rank = gpu_idx + self.init_ddp_connection(self.proc_rank, self.num_gpus) + if dist.get_rank() != 0 and not self.debug: + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") + task = task_cls() + task.trainer = self + torch.cuda.set_device(gpu_idx) + self.root_gpu = gpu_idx + self.task = task + self.run_single_process(task) + + def run_single_process(self, task): + """Sanity check a few things before starting actual training. + + :param task: + """ + # build model, optm and load checkpoint + if self.proc_rank == 0: + self.save_terminal_logs() + if not self.testing: + self.save_codes() + + model = task.build_model() + if model is not None: + task.model = model + checkpoint, _ = get_last_checkpoint(self.work_dir, self.resume_from_checkpoint) + if checkpoint is not None: + self.restore_weights(checkpoint) + elif self.on_gpu: + task.cuda(self.root_gpu) + if not self.testing: + self.optimizers = task.configure_optimizers() + self.fisrt_epoch = True + if checkpoint is not None: + self.restore_opt_state(checkpoint) + del checkpoint + # clear cache after restore + if self.on_gpu: + torch.cuda.empty_cache() + + if self.use_ddp: + self.task = self.configure_ddp(self.task) + dist.barrier() + + task_ref = self.get_task_ref() + task_ref.trainer = self + task_ref.testing = self.testing + # link up experiment object + if self.proc_rank == 0: + task_ref.build_tensorboard(save_dir=self.work_dir, name='tb_logs') + else: + os.makedirs('tmp', exist_ok=True) + task_ref.build_tensorboard(save_dir='tmp', name='tb_tmp') + self.logger = task_ref.logger + try: + if self.testing: + self.run_evaluation(test=True) + else: + self.train() + except KeyboardInterrupt as e: + traceback.print_exc() + task_ref.on_keyboard_interrupt() + + #################### + # valid and test + #################### + def run_evaluation(self, test=False): + eval_results = self.evaluate(self.task, test, tqdm_desc='Valid' if not test else 'test', + max_batches=hparams['eval_max_batches']) + if eval_results is not None and 'tb_log' in eval_results: + tb_log_output = eval_results['tb_log'] + self.log_metrics_to_tb(tb_log_output) + if self.proc_rank == 0 and not test: + self.save_checkpoint(epoch=self.current_epoch, logs=eval_results) + + def evaluate(self, task, test=False, tqdm_desc='Valid', max_batches=None): + if max_batches == -1: + max_batches = None + # enable eval mode + task.zero_grad() + task.eval() + torch.set_grad_enabled(False) + + task_ref = self.get_task_ref() + if test: + ret = task_ref.test_start() + if ret == 'EXIT': + return + else: + task_ref.validation_start() + outputs = [] + dataloader = task_ref.test_dataloader() if test else task_ref.val_dataloader() + pbar = tqdm.tqdm(dataloader, desc=tqdm_desc, total=max_batches, dynamic_ncols=True, unit='step', + disable=self.root_gpu > 0) + # give model a chance to do something with the outputs (and method defined) + for batch_idx, batch in enumerate(pbar): + if batch is None: # pragma: no cover + continue + # stop short when on fast_dev_run (sets max_batch=1) + if max_batches is not None and batch_idx >= max_batches: + break + + # make dataloader_idx arg in validation_step optional + if self.on_gpu: + batch = move_to_cuda(batch, self.root_gpu) + args = [batch, batch_idx] + if self.use_ddp: + output = task(*args) + else: + if test: + output = task_ref.test_step(*args) + else: + output = task_ref.validation_step(*args) + # track outputs for collation + outputs.append(output) + # give model a chance to do something with the outputs (and method defined) + if test: + eval_results = task_ref.test_end(outputs) + else: + eval_results = task_ref.validation_end(outputs) + # enable train mode again + task.train() + torch.set_grad_enabled(True) + return eval_results + + #################### + # train + #################### + def train(self): + task_ref = self.get_task_ref() + task_ref.on_train_start() + if self.num_sanity_val_steps > 0: + # run tiny validation (if validation defined) to make sure program won't crash during val + self.evaluate(self.task, False, 'Sanity Val', max_batches=self.num_sanity_val_steps) + # clear cache before training + if self.on_gpu: + torch.cuda.empty_cache() + dataloader = task_ref.train_dataloader() + epoch = self.current_epoch + # run all epochs + while True: + # set seed for distributed sampler (enables shuffling for each epoch) + if self.use_ddp and hasattr(dataloader.sampler, 'set_epoch'): + dataloader.sampler.set_epoch(epoch) + # update training progress in trainer and model + task_ref.current_epoch = epoch + self.current_epoch = epoch + # total batches includes multiple val checks + self.batch_loss_value = 0 # accumulated grads + # before epoch hook + task_ref.on_epoch_start() + + # run epoch + train_pbar = tqdm.tqdm(dataloader, initial=self.global_step, total=float('inf'), + dynamic_ncols=True, unit='step', disable=self.root_gpu > 0) + for batch_idx, batch in enumerate(train_pbar): + if self.global_step % self.val_check_interval == 0 and not self.fisrt_epoch: + self.run_evaluation() + pbar_metrics, tb_metrics = self.run_training_batch(batch_idx, batch) + train_pbar.set_postfix(**pbar_metrics) + self.fisrt_epoch = False + # when metrics should be logged + if (self.global_step + 1) % self.tb_log_interval == 0: + # logs user requested information to logger + self.log_metrics_to_tb(tb_metrics) + + self.global_step += 1 + task_ref.global_step = self.global_step + if self.global_step > self.max_updates: + print("| Training end..") + break + # epoch end hook + epoch_loss_dict = task_ref.on_epoch_end() + self.log_metrics_to_tb(epoch_loss_dict) + epoch += 1 + if self.global_step > self.max_updates: + break + task_ref.on_train_end() + + def run_training_batch(self, batch_idx, batch): + if batch is None: + return {} + all_progress_bar_metrics = [] + all_log_metrics = [] + task_ref = self.get_task_ref() + for opt_idx, optimizer in enumerate(self.optimizers): + if optimizer is None: + continue + # make sure only the gradients of the current optimizer's paramaters are calculated + # in the training step to prevent dangling gradients in multiple-optimizer setup. + if len(self.optimizers) > 1: + for param in task_ref.parameters(): + param.requires_grad = False + for group in optimizer.param_groups: + for param in group['params']: + param.requires_grad = True + + # forward pass + with autocast(enabled=self.amp): + if self.on_gpu: + batch = move_to_cuda(copy.copy(batch), self.root_gpu) + args = [batch, batch_idx, opt_idx] + if self.use_ddp: + output = self.task(*args) + else: + output = task_ref.training_step(*args) + loss = output['loss'] + if loss is None: + continue + progress_bar_metrics = output['progress_bar'] + log_metrics = output['tb_log'] + # accumulate loss + loss = loss / self.accumulate_grad_batches + + # backward pass + if loss.requires_grad: + if self.amp: + self.amp_scalar.scale(loss).backward() + else: + loss.backward() + + # track progress bar metrics + all_log_metrics.append(log_metrics) + all_progress_bar_metrics.append(progress_bar_metrics) + + if loss is None: + continue + + # nan grads + if self.print_nan_grads: + has_nan_grad = False + for name, param in task_ref.named_parameters(): + if (param.grad is not None) and torch.isnan(param.grad.float()).any(): + print("| NaN params: ", name, param, param.grad) + has_nan_grad = True + if has_nan_grad: + exit(0) + + # gradient update with accumulated gradients + if (self.global_step + 1) % self.accumulate_grad_batches == 0: + grad_norm_dict = task_ref.on_before_optimization(opt_idx) + if grad_norm_dict is not None: + all_log_metrics[-1].update(grad_norm_dict) + if self.amp: + self.amp_scalar.step(optimizer) + self.amp_scalar.update() + else: + optimizer.step() + optimizer.zero_grad() + task_ref.on_after_optimization(self.current_epoch, batch_idx, optimizer, opt_idx) + + # collapse all metrics into one dict + all_progress_bar_metrics = {k: v for d in all_progress_bar_metrics for k, v in d.items()} + all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()} + return all_progress_bar_metrics, all_log_metrics + + #################### + # load and save checkpoint + #################### + def restore_weights(self, checkpoint): + # load model state + task_ref = self.get_task_ref() + + for k, v in checkpoint['state_dict'].items(): + getattr(task_ref, k).load_state_dict(v) + + if self.on_gpu: + task_ref.cuda(self.root_gpu) + # load training state (affects trainer only) + self.best_val_results = checkpoint['checkpoint_callback_best'] + self.global_step = checkpoint['global_step'] + self.current_epoch = checkpoint['epoch'] + task_ref.global_step = self.global_step + + # wait for all models to restore weights + if self.use_ddp: + # wait for all processes to catch up + dist.barrier() + + def restore_opt_state(self, checkpoint): + if self.testing: + return + # restore the optimizers + optimizer_states = checkpoint['optimizer_states'] + for optimizer, opt_state in zip(self.optimizers, optimizer_states): + if optimizer is None: + return + try: + optimizer.load_state_dict(opt_state) + # move optimizer to GPU 1 weight at a time + if self.on_gpu: + for state in optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, torch.Tensor): + state[k] = v.cuda(self.root_gpu) + except ValueError: + print("| WARMING: optimizer parameters not match !!!") + try: + if dist.is_initialized() and dist.get_rank() > 0: + return + except Exception as e: + print(e) + return + did_restore = True + return did_restore + + def save_checkpoint(self, epoch, logs=None): + monitor_op = np.less + ckpt_path = f'{self.work_dir}/model_ckpt_steps_{self.global_step}.ckpt' + logging.info(f'Epoch {epoch:05d}@{self.global_step}: saving model to {ckpt_path}') + self._atomic_save(ckpt_path) + for old_ckpt in get_all_ckpts(self.work_dir)[self.num_ckpt_keep:]: + remove_file(old_ckpt) + logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}') + current = None + if logs is not None and self.monitor_key in logs: + current = logs[self.monitor_key] + if current is not None and self.save_best: + if monitor_op(current, self.best_val_results): + best_filepath = f'{self.work_dir}/model_ckpt_best.pt' + self.best_val_results = current + logging.info( + f'Epoch {epoch:05d}@{self.global_step}: {self.monitor_key} reached {current:0.5f}. ' + f'Saving model to {best_filepath}') + self._atomic_save(best_filepath) + + def _atomic_save(self, filepath): + checkpoint = self.dump_checkpoint() + tmp_path = str(filepath) + ".part" + torch.save(checkpoint, tmp_path, _use_new_zipfile_serialization=False) + os.replace(tmp_path, filepath) + + def dump_checkpoint(self): + checkpoint = {'epoch': self.current_epoch, 'global_step': self.global_step, + 'checkpoint_callback_best': self.best_val_results} + # save optimizers + optimizer_states = [] + for i, optimizer in enumerate(self.optimizers): + if optimizer is not None: + optimizer_states.append(optimizer.state_dict()) + + checkpoint['optimizer_states'] = optimizer_states + task_ref = self.get_task_ref() + checkpoint['state_dict'] = { + k: v.state_dict() for k, v in task_ref.named_children() if len(list(v.parameters())) > 0} + return checkpoint + + #################### + # DDP + #################### + def configure_ddp(self, task): + task = DDP(task, device_ids=[self.root_gpu], find_unused_parameters=True) + random.seed(self.seed) + np.random.seed(self.seed) + return task + + def init_ddp_connection(self, proc_rank, world_size): + root_node = '127.0.0.1' + root_node = self.resolve_root_node_address(root_node) + os.environ['MASTER_ADDR'] = root_node + dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) + + def resolve_root_node_address(self, root_node): + if '[' in root_node: + name = root_node.split('[')[0] + number = root_node.split(',')[0] + if '-' in number: + number = number.split('-')[0] + number = re.sub('[^0-9]', '', number) + root_node = name + number + return root_node + + #################### + # utils + #################### + def get_task_ref(self): + from utils.commons.base_task import BaseTask + task: BaseTask = self.task.module if isinstance(self.task, DDP) else self.task + return task + + def log_metrics_to_tb(self, metrics, step=None): + """Logs the metric dict passed in. + + :param metrics: + """ + # turn all tensors to scalars + scalar_metrics = self.metrics_to_scalars(metrics) + + step = step if step is not None else self.global_step + # log actual metrics + if self.proc_rank == 0: + self.log_metrics(self.logger, scalar_metrics, step=step) + + @staticmethod + def log_metrics(logger, metrics, step=None): + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + logger.add_scalar(k, v, step) + + def metrics_to_scalars(self, metrics): + new_metrics = {} + for k, v in metrics.items(): + if isinstance(v, torch.Tensor): + v = v.item() + + if type(v) is dict: + v = self.metrics_to_scalars(v) + + new_metrics[k] = v + + return new_metrics + + def save_terminal_logs(self): + t = datetime.now().strftime('%Y%m%d%H%M%S') + os.makedirs(f'{self.work_dir}/terminal_logs', exist_ok=True) + Tee(f'{self.work_dir}/terminal_logs/log_{t}.txt', 'w') + + def save_codes(self): + if len(hparams['save_codes']) > 0: + t = datetime.now().strftime('%Y%m%d%H%M%S') + code_dir = f'{self.work_dir}/codes/{t}' + subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) + for c in hparams['save_codes']: + if os.path.exists(c): + subprocess.check_call( + f'rsync -aR ' + f'--include="*.py" ' + f'--include="*.yaml" ' + f'--exclude="__pycache__" ' + f'--include="*/" ' + f'--exclude="*" ' + f'"./{c}" "{code_dir}/"', + shell=True) + print(f"| Copied codes to {code_dir}.") diff --git a/MuseTalk_project/3DMM/utils/nn/__pycache__/grad.cpython-39.pyc b/MuseTalk_project/3DMM/utils/nn/__pycache__/grad.cpython-39.pyc new file mode 100644 index 00000000..fbff2f1c Binary files /dev/null and b/MuseTalk_project/3DMM/utils/nn/__pycache__/grad.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/nn/__pycache__/model_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/nn/__pycache__/model_utils.cpython-39.pyc new file mode 100644 index 00000000..d802ac50 Binary files /dev/null and b/MuseTalk_project/3DMM/utils/nn/__pycache__/model_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/nn/__pycache__/schedulers.cpython-39.pyc b/MuseTalk_project/3DMM/utils/nn/__pycache__/schedulers.cpython-39.pyc new file mode 100644 index 00000000..84c9a3cc Binary files /dev/null and b/MuseTalk_project/3DMM/utils/nn/__pycache__/schedulers.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/nn/grad.py b/MuseTalk_project/3DMM/utils/nn/grad.py new file mode 100644 index 00000000..7a098082 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/nn/grad.py @@ -0,0 +1,44 @@ +import torch + +def get_grad_norm(model, l=2): + num_para = 0 + accu_grad = 0 + if isinstance(model, torch.nn.Module): + params = model.parameters() + else: + params = model + for p in params: + if p.grad is None: + continue + num_para += p.numel() + if l == 1: + accu_grad += p.grad.abs(1).sum() + elif l == 2: + accu_grad += p.grad.pow(2).sum() + else: + raise ValueError("Now we only implement l1/l2 norm !") + if l == 2: + accu_grad = accu_grad ** 0.5 + if isinstance(accu_grad, float): + return accu_grad + return accu_grad.item() + +class GradBuffer: + def __init__(self): + self.buffer = {} + + def add(self, model): + for item in model.named_parameters(): + name, param = item + if param.grad is None: + continue + self.buffer[name] = self.buffer.get(name, 0) + param.grad.data + + def apply(self, model): + for item in model.named_parameters(): + name, param = item + if param.grad is None: + continue + if name in self.buffer.keys(): + param.grad.data += self.buffer[name] + self.buffer = {} \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/nn/model_utils.py b/MuseTalk_project/3DMM/utils/nn/model_utils.py new file mode 100644 index 00000000..3585da67 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/nn/model_utils.py @@ -0,0 +1,32 @@ +import numpy as np +import torch + + +def print_arch(model, model_name='model'): + print(f"| {model_name} Arch: ", model) + num_params(model, model_name=model_name) + + +def num_params(model, print_out=True, model_name="model"): + parameters = filter(lambda p: p.requires_grad, model.parameters()) + parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 + if print_out: + print(f'| {model_name} Trainable Parameters: %.3fM' % parameters) + return parameters + +def get_device_of_model(model): + return model.parameters().__next__().device + +def requires_grad(model): + if isinstance(model, torch.nn.Module): + for p in model.parameters(): + p.requires_grad = True + else: + model.requires_grad = True + +def not_requires_grad(model): + if isinstance(model, torch.nn.Module): + for p in model.parameters(): + p.requires_grad = False + else: + model.requires_grad = False diff --git a/MuseTalk_project/3DMM/utils/nn/schedulers.py b/MuseTalk_project/3DMM/utils/nn/schedulers.py new file mode 100644 index 00000000..19f7698b --- /dev/null +++ b/MuseTalk_project/3DMM/utils/nn/schedulers.py @@ -0,0 +1,205 @@ +import numpy as np +from utils.commons.hparams import hparams + + +class NoneSchedule(object): + def __init__(self, optimizer, lr): + self.optimizer = optimizer + self.constant_lr = lr + self.step(0) + + def step(self, num_updates): + self.lr = self.constant_lr + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] + + def get_last_lr(self): + return self.get_lr() + + +class RSQRTSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates, hidden_size): + self.optimizer = optimizer + self.constant_lr = lr + self.warmup_updates = warmup_updates + self.hidden_size = hidden_size + self.lr = lr + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + warmup = min(num_updates / self.warmup_updates, 1.0) + rsqrt_decay = max(self.warmup_updates, num_updates) ** -0.5 + rsqrt_hidden = self.hidden_size ** -0.5 + self.lr = max(constant_lr * warmup * rsqrt_decay * rsqrt_hidden, 1e-7) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + +class WarmupSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates): + self.optimizer = optimizer + self.constant_lr = self.lr = lr + self.warmup_updates = warmup_updates + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + +class ExponentialSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates): + self.optimizer = optimizer + self.constant_lr = self.lr = lr + self.warmup_updates = warmup_updates + for param_group in optimizer.param_groups: + param_group['lr'] = self.lr + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + if self.warmup_updates > 0 and num_updates <= self.warmup_updates: + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + else: + new_lrate = constant_lr * (0.1 ** (num_updates / 250_000)) # decay by 0.1x for every 250k steps + self.lr = max(new_lrate, 1e-7) + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.lr + return self.lr + + +class ExponentialScheduleWithAudattNet(NoneSchedule): + """ + Default Scheduler in AD-NeRF + for audatt net, since it starts at 20_0000 steps, we need to enlarge its lr + in optimizer, we set param_groups[1] to optimize audatt net + """ + def __init__(self, optimizer, lr, warmup_updates=0): + self.optimizer = optimizer + self.constant_lr = self.lr = lr + self.warmup_updates = warmup_updates + optimizer.param_groups[0]['lr'] = self.lr + optimizer.param_groups[1]['lr'] = self.lr * 5 + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + if self.warmup_updates > 0 and num_updates <= self.warmup_updates: + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + else: + new_lrate = constant_lr * (0.1 ** (num_updates / 250_000)) # decay by 0.1x for every 250k steps + self.lr = max(new_lrate, 1e-7) + + self.optimizer.param_groups[0]['lr'] = self.lr + self.optimizer.param_groups[1]['lr'] = self.lr * 5 + return self.lr + +class ExponentialScheduleForRADNeRF(NoneSchedule): + """ + Default Scheduler in RAD-NeRF + RAD-NeRF has two groups of params with different lr + for tileGrid embedding, the lr=5e-3 + for other network params, the lr=5e-4 + """ + def __init__(self, optimizer, lr, warmup_updates=0): + self.optimizer = optimizer + self.constant_lr = self.lr = lr # 0.0005 + self.warmup_updates = warmup_updates + self.finetune_lips = hparams['finetune_lips'] + self.finetune_lips_start_iter = hparams['finetune_lips_start_iter'] + + optimizer.param_groups[0]['lr'] = self.lr # for Net_params in RAD-NeRF, lr starts from 0.0005 + optimizer.param_groups[1]['lr'] = self.lr * 10 # for tileGrid, lr starts from 0.005 + optimizer.param_groups[2]['lr'] = self.lr * 5 # for Att Net, lr starts from 0.0025 + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + if self.warmup_updates > 0 and num_updates <= self.warmup_updates: + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + else: + if self.finetune_lips and num_updates > self.finetune_lips_start_iter: + new_lrate = constant_lr * (0.1 ** (num_updates / 250_000)) # decay by 0.05x for every 200k steps + else: + new_lrate = constant_lr * (0.1 ** (num_updates / 250_000)) # decay by 0.1x for every 200k steps + + self.lr = max(new_lrate, 1e-7) + + self.optimizer.param_groups[0]['lr'] = self.lr + self.optimizer.param_groups[1]['lr'] = self.lr * 10 + self.optimizer.param_groups[2]['lr'] = self.lr * 5 + return self.lr + + +class ExponentialScheduleForRADNeRFTorso(NoneSchedule): + """ + Default Scheduler in RAD-NeRF + RAD-NeRF has two groups of params with different lr + for tileGrid embedding, the lr=5e-3 + for other network params, the lr=5e-4 + """ + def __init__(self, optimizer, lr, warmup_updates=0): + self.optimizer = optimizer + self.constant_lr = self.lr = lr # 0.0005 + self.warmup_updates = warmup_updates + + optimizer.param_groups[0]['lr'] = self.lr # for Net_params in RAD-NeRF, lr starts from 0.0005 + optimizer.param_groups[1]['lr'] = self.lr * 10 # for tileGrid, lr starts from 0.005 + self.step(0) + + def step(self, num_updates): + constant_lr = self.constant_lr + if self.warmup_updates > 0 and num_updates <= self.warmup_updates: + warmup = min(num_updates / self.warmup_updates, 1.0) + self.lr = max(constant_lr * warmup, 1e-7) + else: + new_lrate = constant_lr * (0.1 ** (num_updates / 250_000)) # decay by 0.1x for every 200k steps + self.lr = max(new_lrate, 1e-7) + self.optimizer.param_groups[0]['lr'] = self.lr + self.optimizer.param_groups[1]['lr'] = self.lr * 10 + return self.lr + + +class CosineSchedule(NoneSchedule): + def __init__(self, optimizer, lr, warmup_updates, total_updates): + self.optimizer = optimizer + self.constant_lr = lr + self.warmup_updates = warmup_updates + self.total_updates = total_updates + self.lr = lr + self.assign_learning_rate(self.optimizer, self.lr) + self.step(0) + + def assign_learning_rate(self, optimizer, new_lr): + for param_group in optimizer.param_groups: + param_group["lr"] = new_lr + + def _warmup_lr(self, base_lr, warmup_length, step): + return base_lr * (step + 1) / warmup_length + + def step(self, num_updates): + if self.warmup_updates > 0 and num_updates <= self.warmup_updates: + lr = self._warmup_lr(self.lr, self.warmup_updates, num_updates) + else: + e = num_updates - self.warmup_updates + es = self.total_updates - self.warmup_updates + lr = 0.5 * (1 + np.cos(np.pi * e / es)) * self.lr + self.assign_learning_rate(self.optimizer, lr) + return lr diff --git a/MuseTalk_project/3DMM/utils/nn/seq_utils.py b/MuseTalk_project/3DMM/utils/nn/seq_utils.py new file mode 100644 index 00000000..1308bf7d --- /dev/null +++ b/MuseTalk_project/3DMM/utils/nn/seq_utils.py @@ -0,0 +1,305 @@ +from collections import defaultdict +import torch +import torch.nn.functional as F + + +def make_positions(tensor, padding_idx): + """Replace non-padding symbols with their position numbers. + + Position numbers begin at padding_idx+1. Padding symbols are ignored. + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. In particular XLA + # prefers ints, cumsum defaults to output longs, and ONNX doesn't know + # how to handle the dtype kwarg in cumsum. + mask = tensor.ne(padding_idx).int() + return ( + torch.cumsum(mask, dim=1).type_as(mask) * mask + ).long() + padding_idx + + +def softmax(x, dim): + return F.softmax(x, dim=dim, dtype=torch.float32) + + +def sequence_mask(lengths, maxlen, dtype=torch.bool): + if maxlen is None: + maxlen = lengths.max() + mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() + mask.type(dtype) + return mask + + +def weights_nonzero_speech(target): + # target : B x T x mel + # Assign weight 1.0 to all labels except for padding (id=0). + dim = target.size(-1) + return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim) + + +INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) + + +def _get_full_incremental_state_key(module_instance, key): + module_name = module_instance.__class__.__name__ + + # assign a unique ID to each module instance, so that incremental state is + # not shared across module instances + if not hasattr(module_instance, '_instance_id'): + INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 + module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] + + return '{}.{}.{}'.format(module_name, module_instance._instance_id, key) + + +def get_incremental_state(module, incremental_state, key): + """Helper for getting incremental state for an nn.Module.""" + full_key = _get_full_incremental_state_key(module, key) + if incremental_state is None or full_key not in incremental_state: + return None + return incremental_state[full_key] + + +def set_incremental_state(module, incremental_state, key, value): + """Helper for setting incremental state for an nn.Module.""" + if incremental_state is not None: + full_key = _get_full_incremental_state_key(module, key) + incremental_state[full_key] = value + + +def fill_with_neg_inf(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(float('-inf')).type_as(t) + + +def fill_with_neg_inf2(t): + """FP16-compatible function that fills a tensor with -inf.""" + return t.float().fill_(-1e8).type_as(t) + + +def select_attn(attn_logits, type='best'): + """ + + :param attn_logits: [n_layers, B, n_head, T_sp, T_txt] + :return: + """ + encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2) + # [n_layers * n_head, B, T_sp, T_txt] + encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1) + if type == 'best': + indices = encdec_attn.max(-1).values.sum(-1).argmax(0) + encdec_attn = encdec_attn.gather( + 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0] + return encdec_attn + elif type == 'mean': + return encdec_attn.mean(0) + + +def make_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + Tensor: Mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[0, 0, 0, 0 ,0], + [0, 0, 0, 1, 1], + [0, 0, 1, 1, 1]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0], + [0, 0, 0, 0]], + [[0, 0, 0, 1], + [0, 0, 0, 1]], + [[0, 0, 1, 1], + [0, 0, 1, 1]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_pad_mask(lengths, xs) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_pad_mask(lengths, xs, 1) + tensor([[[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]], + [[0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) + >>> make_pad_mask(lengths, xs, 2) + tensor([[[0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1]], + [[0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 1, 1]], + [[0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1], + [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) + """ + if length_dim == 0: + raise ValueError("length_dim cannot be 0: {}".format(length_dim)) + + if not isinstance(lengths, list): + lengths = lengths.tolist() + bs = int(len(lengths)) + if xs is None: + maxlen = int(max(lengths)) + else: + maxlen = xs.size(length_dim) + + seq_range = torch.arange(0, maxlen, dtype=torch.int64) + seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) + seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + + if xs is not None: + assert xs.size(0) == bs, (xs.size(0), bs) + + if length_dim < 0: + length_dim = xs.dim() + length_dim + # ind = (:, None, ..., None, :, , None, ..., None) + ind = tuple( + slice(None) if i in (0, length_dim) else None for i in range(xs.dim()) + ) + mask = mask[ind].expand_as(xs).to(xs.device) + return mask + + +def make_non_pad_mask(lengths, xs=None, length_dim=-1): + """Make mask tensor containing indices of non-padded part. + Args: + lengths (LongTensor or List): Batch of lengths (B,). + xs (Tensor, optional): The reference tensor. + If set, masks will be the same shape as this tensor. + length_dim (int, optional): Dimension indicator of the above tensor. + See the example. + Returns: + ByteTensor: mask tensor containing indices of padded part. + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (including 1.2) + Examples: + With only lengths. + >>> lengths = [5, 3, 2] + >>> make_non_pad_mask(lengths) + masks = [[1, 1, 1, 1 ,1], + [1, 1, 1, 0, 0], + [1, 1, 0, 0, 0]] + With the reference tensor. + >>> xs = torch.zeros((3, 2, 4)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 1, 0], + [1, 1, 1, 0]], + [[1, 1, 0, 0], + [1, 1, 0, 0]]], dtype=torch.uint8) + >>> xs = torch.zeros((3, 2, 6)) + >>> make_non_pad_mask(lengths, xs) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + With the reference tensor and dimension indicator. + >>> xs = torch.zeros((3, 6, 6)) + >>> make_non_pad_mask(lengths, xs, 1) + tensor([[[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]], + [[1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8) + >>> make_non_pad_mask(lengths, xs, 2) + tensor([[[1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 0]], + [[1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0], + [1, 1, 1, 0, 0, 0]], + [[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) + """ + return ~make_pad_mask(lengths, xs, length_dim) + + +def get_mask_from_lengths(lengths): + max_len = torch.max(lengths).item() + ids = torch.arange(0, max_len).to(lengths.device) + mask = (ids < lengths.unsqueeze(1)).bool() + return mask + + +def group_hidden_by_segs(h, seg_ids, max_len): + """ + + :param h: [B, T, H] + :param seg_ids: [B, T] + :return: h_ph: [B, T_ph, H] + """ + B, T, H = h.shape + h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) + all_ones = h.new_ones(h.shape[:2]) + cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() + h_gby_segs = h_gby_segs[:, 1:] + cnt_gby_segs = cnt_gby_segs[:, 1:] + h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) + return h_gby_segs, cnt_gby_segs diff --git a/MuseTalk_project/3DMM/utils/visualization/__pycache__/ffmpeg_utils.cpython-39.pyc b/MuseTalk_project/3DMM/utils/visualization/__pycache__/ffmpeg_utils.cpython-39.pyc new file mode 100644 index 00000000..9d69a19f Binary files /dev/null and b/MuseTalk_project/3DMM/utils/visualization/__pycache__/ffmpeg_utils.cpython-39.pyc differ diff --git a/MuseTalk_project/3DMM/utils/visualization/draw_3d_landmark.py b/MuseTalk_project/3DMM/utils/visualization/draw_3d_landmark.py new file mode 100644 index 00000000..fe0ead4c --- /dev/null +++ b/MuseTalk_project/3DMM/utils/visualization/draw_3d_landmark.py @@ -0,0 +1,364 @@ +import cv2 +import math +import numpy as np +import matplotlib.pyplot as plt +import dearpygui.dearpygui as dpg +from scipy.spatial.transform import Rotation as R +from utils.commons.hparams import set_hparams, hparams +from data_util.face3d_helper import Face3DHelper + +face3d_helper = Face3DHelper(use_gpu=False) + + +set_hparams("egs/datasets/videos/May/radnerf_torso.yaml") + +from tasks.radnerfs.dataset_utils import RADNeRFDataset +dataset = RADNeRFDataset("val") +idexp_lm3d_mean = dataset.idexp_lm3d_mean.reshape([68,3]) +lm3d_mean = idexp_lm3d_mean / 10 + face3d_helper.key_mean_shape +lm3d_mean /= 1.5 # normalize to [-1,1] + +class Landmark3D: + + def __init__(self): + + # init pose [18, 3], in [-1, 1]^3 + self.points3D = np.concatenate([lm3d_mean.numpy(), np.ones([68,1])],axis=1).reshape([68,4]) + + # lines [17, 2] + self.lines = [ + # yaw + [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5,6], [6,7], [7,8], [8,9], [9,10], [10,11], [11,12], [12,13], [13,14], [14,15], [15,16], + # left brow + [17,18], [18,19], [19,20], [20,21], + # right brow + [22, 23], [23,24], [24,25], [25,26], + # nose + [27,28], [28,29], [29,30], [31,32], [32,33], [33,34], [34,35], + # left eye + [36,37], [37,38], [38,39], [39,40], [40,41], [41,36], + # right eye + [42,43], [43,44], [44,45], [45,46], [46,47], [47,42], + # mouth + [48, 49], [49,50], [50,51], [51,52], [52,53], [53,54], [54,55], [55,56], [56,57], [57,58], [58,59],[59,48], + [48, 60], [60,61], [61,62], [62,63], [63,64], [64,65], [65,66], [66,67], [67,60], [54,64] + ] + # # keypoint color [18, 3] + # self.colors = [[0, 0, 255], [255, 0, 0], [255, 170, 0], [255, 255, 0], [255, 85, 0], [170, 255, 0], + # [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], + # [0, 85, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + + self.colors = [[0,0,255] for _ in range(36)] + [[0,255,0] for _ in range(12)]+ [[255,0,0] for _ in range(20)] + self.line_colors = [[0,0,255] for _ in range(31)] + [[0,255,0] for _ in range(12)]+ [[255,0,0] for _ in range(22)] + + def draw(self, mvp, H, W): + # mvp: [4, 4] + + canvas = np.zeros((H, W, 3), dtype=np.uint8) + + points2D = self.points3D @ mvp.T # [18, 4] + points2D = points2D[:, :3] / points2D[:, 3:] # NDC in [-1, 1] + + xs = (points2D[:, 0] + 1) / 2 * H # [18] + ys = (points2D[:, 1] + 1) / 2 * W # [18] + + # 18 points + for i in range(len(self.points3D)): + cv2.circle(canvas, (int(xs[i]), int(ys[i])), 4, self.colors[i], thickness=-1) + + # 17 lines + for i in range(len(self.lines)): + cur_canvas = canvas.copy() + X = xs[self.lines[i]] + Y = ys[self.lines[i]] + mY = np.mean(Y) + mX = np.mean(X) + length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1])) + polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), 4), int(angle), 0, 360, 1) + + cv2.fillConvexPoly(cur_canvas, polygon, self.line_colors[i]) + + canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) + + canvas = canvas.astype(np.float32) / 255 + return canvas, np.stack([xs, ys], axis=1) + + +class OrbitCamera: + def __init__(self, W, H, r=2, fovy=60, near=0.01, far=100): + self.W = W + self.H = H + self.radius = r # camera distance from center + self.fovy = fovy # in degree + self.near = near + self.far = far + self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point + self.rot = R.from_matrix(np.eye(3)) + self.up = np.array([0, 1, 0], dtype=np.float32) # need to be normalized! + + # pose + @property + def pose(self): + # first move camera to radius + res = np.eye(4, dtype=np.float32) + res[2, 3] = self.radius # opengl convention... + # rotate + rot = np.eye(4, dtype=np.float32) + rot[:3, :3] = self.rot.as_matrix() + res = rot @ res + # translate + res[:3, 3] -= self.center + return res + + # view + @property + def view(self): + return np.linalg.inv(self.pose) + + # intrinsics + @property + def intrinsics(self): + focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2)) + return np.array([focal, focal, self.W // 2, self.H // 2], dtype=np.float32) + + # projection (perspective) + @property + def perspective(self): + y = np.tan(np.radians(self.fovy) / 2) + aspect = self.W / self.H + return np.array([[1/(y*aspect), 0, 0, 0], + [ 0, -1/y, 0, 0], + [ 0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)], + [ 0, 0, -1, 0]], dtype=np.float32) + + + def orbit(self, dx, dy): + # rotate along camera up/side axis! + side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized. + rotvec_x = self.up * np.radians(-0.05 * dx) + rotvec_y = side * np.radians(-0.05 * dy) + self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot + + def scale(self, delta): + self.radius *= 1.1 ** (-delta) + + def pan(self, dx, dy, dz=0): + # pan in camera coordinate system (careful on the sensitivity!) + self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, -dy, dz]) + + +class GUI: + def __init__(self, opt): + self.opt = opt + self.W = opt.W + self.H = opt.H + self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy) + + self.skel = Landmark3D() + + self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32) + self.need_update = True # camera moved, should reset accumulation + + self.save_path = 'pose.png' + self.mouse_loc = np.array([0, 0]) + self.points2D = None # [18, 2] + self.point_idx = 0 + + dpg.create_context() + self.register_dpg() + self.step() + + + def __del__(self): + dpg.destroy_context() + + + def step(self): + + if self.need_update: + + # mvp + mv = self.cam.view # [4, 4] + proj = self.cam.perspective # [4, 4] + mvp = proj @ mv + + # render our openpose image, somehow + self.render_buffer, self.points2D = self.skel.draw(mvp, self.H, self.W) + + self.need_update = False + + dpg.set_value("_texture", self.render_buffer) + + + def register_dpg(self): + + ### register texture + + with dpg.texture_registry(show=False): + dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag="_texture") + + ### register window + + # the rendered image, as the primary window + with dpg.window(label="Viewer", tag="_primary_window", width=self.W, height=self.H): + dpg.add_image("_texture") + + dpg.set_primary_window("_primary_window", True) + + # control window + with dpg.window(label="Control", tag="_control_window", width=-1, height=-1): + + # button theme + with dpg.theme() as theme_button: + with dpg.theme_component(dpg.mvButton): + dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18)) + dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47)) + dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83)) + dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5) + dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3) + + def callback_save(sender, app_data): + image = (self.render_buffer * 255).astype(np.uint8) + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + cv2.imwrite(self.save_path, image) + print(f'[INFO] write image to {self.save_path}') + + def callback_set_save_path(sender, app_data): + self.save_path = app_data + + with dpg.group(horizontal=True): + dpg.add_button(label="save image", tag="_button_save", callback=callback_save) + dpg.bind_item_theme("_button_save", theme_button) + + dpg.add_input_text(label="", default_value=self.save_path, callback=callback_set_save_path) + + # fov slider + def callback_set_fovy(sender, app_data): + self.cam.fovy = app_data + self.need_update = True + + dpg.add_slider_int(label="FoV (vertical)", min_value=1, max_value=120, format="%d deg", default_value=self.cam.fovy, callback=callback_set_fovy) + + + ### register camera handler + + def callback_camera_drag_rotate(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + # dx = app_data[1] + # dy = app_data[2] + + # self.cam.orbit(dx, dy) + self.need_update = True + + + def callback_camera_wheel_scale(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + delta = app_data + + self.cam.scale(delta) + self.need_update = True + + + def callback_camera_drag_pan(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + dx = app_data[1] + dy = app_data[2] + + self.cam.pan(dx, dy) + self.need_update = True + + def callback_set_mouse_loc(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + # just the pixel coordinate in image + self.mouse_loc = np.array(app_data) + + def callback_skel_select(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + # determine the selected keypoint from mouse_loc + if self.points2D is None: return # not prepared + + dist = np.linalg.norm(self.points2D - self.mouse_loc, axis=1) # [18] + self.point_idx = np.argmin(dist) + + + def callback_skel_drag(sender, app_data): + + if not dpg.is_item_focused("_primary_window"): + return + + # 2D to 3D delta + dx = app_data[1] + dy = app_data[2] + + self.skel.points3D[self.point_idx, :3] += 0.0002 * self.cam.rot.as_matrix()[:3, :3] @ np.array([dx, -dy, 0]) + self.need_update = True + + + with dpg.handler_registry(): + dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate) + dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale) + dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan) + + # for skeleton editing + dpg.add_mouse_move_handler(callback=callback_set_mouse_loc) + dpg.add_mouse_click_handler(button=dpg.mvMouseButton_Right, callback=callback_skel_select) + dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Right, callback=callback_skel_drag) + + + dpg.create_viewport(title='pose viewer', resizable=False, width=self.W, height=self.H) + + ### global theme + with dpg.theme() as theme_no_padding: + with dpg.theme_component(dpg.mvAll): + # set all padding to 0 to avoid scroll bar + dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core) + dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core) + dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core) + + dpg.bind_item_theme("_primary_window", theme_no_padding) + dpg.focus_item("_primary_window") + + dpg.setup_dearpygui() + + #dpg.show_metrics() + + dpg.show_viewport() + + + def render(self): + + while dpg.is_dearpygui_running(): + self.step() + dpg.render_dearpygui_frame() + + +if __name__ == '__main__': + + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('--W', type=int, default=512, help="GUI width") + parser.add_argument('--H', type=int, default=512, help="GUI height") + parser.add_argument('--radius', type=float, default=3, help="default GUI camera radius from center") + parser.add_argument('--fovy', type=float, default=25, help="default GUI camera fovy") + + opt = parser.parse_args() + + gui = GUI(opt) + gui.render() \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/visualization/ffmpeg_utils.py b/MuseTalk_project/3DMM/utils/visualization/ffmpeg_utils.py new file mode 100644 index 00000000..8cc8a715 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/visualization/ffmpeg_utils.py @@ -0,0 +1,18 @@ +import os + +def imgs_to_video(img_dir, video_path, audio_path=None, verbose=False): + cmd = f"ffmpeg -i {img_dir}/%5d.png " + if audio_path is not None: + cmd += f"-i {audio_path} " + cmd += "-strict -2 " + cmd += "-c:v libx264 -pix_fmt yuv420p -b:v 2000k -y " + if verbose is False: + cmd += " -v quiet " + cmd += f"{video_path} " + + os.system(cmd) + + +if __name__ == '__main__': + imgs_to_video('infer_out/tmp_imgs', 'infer_out/tmp_imgs/out.mp4', 'data/raw/val_wavs/zozo.wav') + imgs_to_video('infer_out/tmp_imgs', 'infer_out/tmp_imgs/out2.mp4', 'data/raw/val_wavs/zozo.wav') \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/visualization/lm_visualizer.py b/MuseTalk_project/3DMM/utils/visualization/lm_visualizer.py new file mode 100644 index 00000000..3c905573 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/visualization/lm_visualizer.py @@ -0,0 +1,57 @@ +import numpy as np +import cv2 +from data_util.face3d_helper import Face3DHelper +from utils.visualization.ffmpeg_utils import imgs_to_video +import os + +face3d_helper = Face3DHelper('deep_3drecon/BFM') +# lrs3_stats = np.load('data/binary/lrs3/stats.npy',allow_pickle=True).tolist() +# lrs3_idexp_mean = lrs3_stats['idexp_lm3d_mean'].reshape([1,204]) +# lrs3_idexp_std = lrs3_stats['idexp_lm3d_std'].reshape([1,204]) + + +def render_idexp_npy_to_lm_video(npy_name, out_video_name, audio_name=None): + idexp_lm3d = np.load(npy_name) + lm3d = idexp_lm3d / 10 + face3d_helper.key_mean_shape.squeeze().reshape([1, -1]).cpu().numpy() + lm3d = lm3d.reshape([-1, 68, 3]) + + tmp_img_dir = os.path.join(os.path.dirname(out_video_name), "tmp_lm3d_imgs") + os.makedirs(tmp_img_dir, exist_ok=True) + + WH = 512 + lm3d = (lm3d * WH/2 + WH/2).astype(int) + eye_idx = list(range(36,48)) + mouth_idx = list(range(48,68)) + for i_img in range(len(lm3d)): + lm2d = lm3d[i_img ,:, :2] # [68, 2] + img = np.ones([WH+50, WH+50, 3], dtype=np.uint8) * 255 + + for i in range(len(lm2d)): + x, y = lm2d[i] + if i in eye_idx: + color = (0,0,255) + elif i in mouth_idx: + color = (0,255,0) + else: + color = (255,0,0) + img = cv2.circle(img, center=(x,y), radius=3, color=color, thickness=-1) + font = cv2.FONT_HERSHEY_SIMPLEX + img = cv2.flip(img, 0) + for i in range(len(lm2d)): + x, y = lm2d[i] + y = WH - y + img = cv2.putText(img, f"{i}", org=(x,y), fontFace=font, fontScale=0.3, color=(255,0,0)) + + out_name = os.path.join(tmp_img_dir, f'{format(i_img, "05d")}.png') + cv2.imwrite(out_name, img) + imgs_to_video(tmp_img_dir, out_video_name, audio_name) + os.system(f"rm -r {tmp_img_dir}") + +if __name__ == '__main__': + import argparse + argparser = argparse.ArgumentParser() + argparser.add_argument('--npy_name', type=str, default="infer_out/Obama/pred_lm3d/origin.npy", help='the path of landmark .npy') + argparser.add_argument('--audio_name', type=str, default="data/raw/val_wavs/origin.wav", help='the path of audio file') + argparser.add_argument('--out_path', type=str, default="visualized_lm3d/origin.mp4", help='the path to save visualization results') + args = argparser.parse_args() + render_idexp_npy_to_lm_video(args.npy_name, args.out_path, audio_name=args.audio_name) \ No newline at end of file diff --git a/MuseTalk_project/3DMM/utils/visualization/t-sne.py b/MuseTalk_project/3DMM/utils/visualization/t-sne.py new file mode 100644 index 00000000..a0322650 --- /dev/null +++ b/MuseTalk_project/3DMM/utils/visualization/t-sne.py @@ -0,0 +1,132 @@ +from openTSNE import TSNE +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +import random + +def visualize( + x, + y, + ax=None, + title=None, + draw_legend=True, + draw_centers=False, + draw_cluster_labels=False, + colors=None, + legend_kwargs=None, + label_order=None, + **kwargs +): + + if ax is None: + _, ax = matplotlib.pyplot.subplots(figsize=(10, 8)) + + if title is not None: + ax.set_title(title) + + plot_params = {"alpha": kwargs.get("alpha", 0.6), "s": kwargs.get("s", 1)} + + # Create main plot + if label_order is not None: + assert all(np.isin(np.unique(y), label_order)) + classes = [l for l in label_order if l in np.unique(y)] + else: + classes = np.unique(y) + if colors is None: + default_colors = matplotlib.rcParams["axes.prop_cycle"] + colors = {k: v["color"] for k, v in zip(classes, default_colors())} + + point_colors = list(map(colors.get, y)) + + ax.scatter(x[:, 0], x[:, 1], c=point_colors, rasterized=True, **plot_params) + + # Plot mediods + if draw_centers: + centers = [] + for yi in classes: + mask = yi == y + centers.append(np.median(x[mask, :2], axis=0)) + centers = np.array(centers) + + center_colors = list(map(colors.get, classes)) + ax.scatter( + centers[:, 0], centers[:, 1], c=center_colors, s=48, alpha=1, edgecolor="k" + ) + + # Draw mediod labels + if draw_cluster_labels: + for idx, label in enumerate(classes): + ax.text( + centers[idx, 0], + centers[idx, 1] + 2.2, + label, + fontsize=kwargs.get("fontsize", 6), + horizontalalignment="center", + ) + + # Hide ticks and axis + ax.set_xticks([]), ax.set_yticks([]), ax.axis("off") + + if draw_legend: + legend_handles = [ + matplotlib.lines.Line2D( + [], + [], + marker="s", + color="w", + markerfacecolor=colors[yi], + ms=10, + alpha=1, + linewidth=0, + label=yi, + markeredgecolor="k", + ) + for yi in classes + ] + legend_kwargs_ = dict(loc="best", bbox_to_anchor=(0.05, 0.5), frameon=False, ) + if legend_kwargs is not None: + legend_kwargs_.update(legend_kwargs) + ax.legend(handles=legend_handles, **legend_kwargs_) + + +tsne = TSNE( + perplexity=30, + metric="euclidean", + n_jobs=8, + random_state=42, + verbose=True, +) + +idexp_lm3d_pred_lrs3 = np.load("infer_out/tmp_npys/lrs3_pred_all.npy") +idx = np.random.choice(np.arange(len(idexp_lm3d_pred_lrs3)), 10000) +idexp_lm3d_pred_lrs3 = idexp_lm3d_pred_lrs3[idx] + +person_ds = np.load("data/binary/videos/May/trainval_dataset.npy", allow_pickle=True).tolist() +person_idexp_mean = person_ds['idexp_lm3d_mean'].reshape([1,204]) +person_idexp_std = person_ds['idexp_lm3d_std'].reshape([1,204]) +person_idexp_lm3d_train = np.stack([s['idexp_lm3d_normalized'].reshape([204,]) for s in person_ds['train_samples']]) +person_idexp_lm3d_val = np.stack([s['idexp_lm3d_normalized'].reshape([204,]) for s in person_ds['val_samples']]) + +lrs3_stats = np.load('/home/yezhenhui/datasets/binary/lrs3_0702/stats.npy',allow_pickle=True).tolist() +lrs3_idexp_mean = lrs3_stats['idexp_lm3d_mean'].reshape([1,204]) +lrs3_idexp_std = lrs3_stats['idexp_lm3d_std'].reshape([1,204]) +person_idexp_lm3d_train = person_idexp_lm3d_train * person_idexp_std + person_idexp_mean +# person_idexp_lm3d_train = (person_idexp_lm3d_train - lrs3_idexp_mean) / lrs3_idexp_std +person_idexp_lm3d_val = person_idexp_lm3d_val * person_idexp_std + person_idexp_mean +# person_idexp_lm3d_val = (person_idexp_lm3d_val - lrs3_idexp_mean) / lrs3_idexp_std +idexp_lm3d_pred_lrs3 = idexp_lm3d_pred_lrs3 * lrs3_idexp_std + lrs3_idexp_mean + + +idexp_lm3d_pred_vae = np.load("infer_out/tmp_npys/pred_exp_0_vae.npy").reshape([-1,204]) +idexp_lm3d_pred_postnet = np.load("infer_out/tmp_npys/pred_exp_0_postnet_hubert.npy").reshape([-1,204]) +# idexp_lm3d_pred_postnet = idexp_lm3d_pred_postnet * lrs3_idexp_std + lrs3_idexp_mean + +idexp_lm3d_all = np.concatenate([idexp_lm3d_pred_lrs3, person_idexp_lm3d_train,idexp_lm3d_pred_vae, idexp_lm3d_pred_postnet]) +idexp_lm3d_all_emb = tsne.fit(idexp_lm3d_all) # array(float64) [B,50]==>[B, 2] +# z_p_emb = tsne.fit(z_p) # array(float64) [B,50]==>[B, 2] +y1 = ["pred_lrs3" for _ in range(len(idexp_lm3d_pred_lrs3))] +y2 = ["person_train" for _ in range(len(person_idexp_lm3d_train))] +y3 = ["vae" for _ in range(len(idexp_lm3d_pred_vae))] +y4 = ["postnet" for _ in range(len(idexp_lm3d_pred_postnet))] +visualize(idexp_lm3d_all_emb, y1+y2+y3+y4) +plt.savefig("infer_out/tmp_npys/lrs3_pred_all_0k.png") \ No newline at end of file diff --git a/MuseTalk_project/MuseTalk/Dockerfile b/MuseTalk_project/MuseTalk/Dockerfile new file mode 100644 index 00000000..17825f57 --- /dev/null +++ b/MuseTalk_project/MuseTalk/Dockerfile @@ -0,0 +1,56 @@ +FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +COPY ./Miniconda3-latest-Linux-x86_64.sh /workspace/Miniconda3-latest-Linux-x86_64.sh + +RUN apt-get update && apt-get install -y wget bzip2 && apt-get clean && \ + bash /workspace/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda && \ + rm /workspace/Miniconda3-latest-Linux-x86_64.sh && \ + /opt/conda/bin/conda clean -a + +ENV PATH="/opt/conda/bin:$PATH" + +WORKDIR /workspace + +COPY ./MuseTalk /workspace + +COPY environments.yaml /workspace/environments.yaml + +COPY requirements.txt /workspace/requirements.txt + +RUN conda config --add channels nvidia && \ + conda config --set show_channel_urls yes && \ + conda config --set channel_priority false && \ + conda config --set remote_connect_timeout_secs 60 && \ + conda config --set remote_read_timeout_secs 120 && \ + conda config --set remote_max_retries 5 + + +RUN conda env create -f /workspace/environments.yaml + +RUN conda clean -a && \ + echo "conda activate muse" > ~/.bashrc + +RUN conda init bash + +SHELL ["conda", "run", "-n", "muse", "/bin/bash", "-c"] + +ENV FFMPEG_PATH=/workspace/ffmpeg-4.4-amd64-static +ENV PATH="${FFMPEG_PATH}:$PATH" + +RUN pip install omegaconf==2.3.0 +RUN pip install numpy==1.23.5 + +RUN apt-get update && apt-get install -y libgl1-mesa-glx +RUN apt-get update && apt-get install -y libglib2.0-0 +RUN pip install huggingface-hub==0.25.1 +RUN mim install "mmcv==2.1.0" + +COPY ./inference.py /workspace/scripts/inference.py + +RUN pip install scikit-image==0.24.0 +RUN pip install scenedetect +RUN pip install python_speech_features + +CMD ["bash"] \ No newline at end of file diff --git a/MuseTalk_project/MuseTalk/README.md b/MuseTalk_project/MuseTalk/README.md new file mode 100644 index 00000000..70579ca5 --- /dev/null +++ b/MuseTalk_project/MuseTalk/README.md @@ -0,0 +1,40 @@ +# MuseTalk +>https://github.com/TMElyralab/MuseTalk/tree/main + +## 登录synapse账户 + +``` +docker login docker.synapse.org +``` + +## 拉取镜像 +``` +docker pull docker.synapse.org/syn64432387/musetalk:latest +``` + +## 运行docker容器 + +``` +docker run --shm-size=16g -it --gpus all docker.synapse.org/syn64432387/musetalk +``` + +其中 `--shm-size=16g`意味着为docker容器分配16G内存,可以根据本地情况调整,但注意若分配内存较小可能导致因运行时内存不足报错 + +## 激活环境 + +``` +conda activate muse +``` + +## 1.运行inference(指令和github仓库相同) +``` +python -m scripts.inference --input_image 输入的视频/图片路径 --input_audio 输入的音频路径 --output_dir 结果保存路径 +``` +若不设置output_dir,则结果保存在/workspace/result + +## 2.运行评测 +首先准备数据放在docker容器的根目录下,在/workspace下运行指令 +``` +python demo.py --input_image 输入的视频/图片路径 --input_audio 输入的音频路径 --ground_truth 参考视频路径 +``` +若不设置参考视频,将以输入视频作为参考视频。评测结果保存在/workspace/evaluation \ No newline at end of file diff --git a/MuseTalk_project/MuseTalk/__init__.py b/MuseTalk_project/MuseTalk/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/MuseTalk_project/MuseTalk/app.py b/MuseTalk_project/MuseTalk/app.py new file mode 100644 index 00000000..feec2393 --- /dev/null +++ b/MuseTalk_project/MuseTalk/app.py @@ -0,0 +1,426 @@ +import os +import time +import pdb +import re + +import gradio as gr +import spaces +import numpy as np +import sys +import subprocess + +from huggingface_hub import snapshot_download +import requests + +import argparse +import os +from omegaconf import OmegaConf +import numpy as np +import cv2 +import torch +import glob +import pickle +from tqdm import tqdm +import copy +from argparse import Namespace +import shutil +import gdown +import imageio +import ffmpeg +from moviepy.editor import * + + +ProjectDir = os.path.abspath(os.path.dirname(__file__)) +CheckpointsDir = os.path.join(ProjectDir, "models") + +def print_directory_contents(path): + for child in os.listdir(path): + child_path = os.path.join(path, child) + if os.path.isdir(child_path): + print(child_path) + +def download_model(): + if not os.path.exists(CheckpointsDir): + os.makedirs(CheckpointsDir) + print("Checkpoint Not Downloaded, start downloading...") + tic = time.time() + snapshot_download( + repo_id="TMElyralab/MuseTalk", + local_dir=CheckpointsDir, + max_workers=8, + local_dir_use_symlinks=True, + force_download=True, resume_download=False + ) + # weight + os.makedirs(f"{CheckpointsDir}/sd-vae-ft-mse/") + snapshot_download( + repo_id="stabilityai/sd-vae-ft-mse", + local_dir=CheckpointsDir+'/sd-vae-ft-mse', + max_workers=8, + local_dir_use_symlinks=True, + force_download=True, resume_download=False + ) + #dwpose + os.makedirs(f"{CheckpointsDir}/dwpose/") + snapshot_download( + repo_id="yzd-v/DWPose", + local_dir=CheckpointsDir+'/dwpose', + max_workers=8, + local_dir_use_symlinks=True, + force_download=True, resume_download=False + ) + #vae + url = "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt" + response = requests.get(url) + # 确保请求成功 + if response.status_code == 200: + # 指定文件保存的位置 + file_path = f"{CheckpointsDir}/whisper/tiny.pt" + os.makedirs(f"{CheckpointsDir}/whisper/") + # 将文件内容写入指定位置 + with open(file_path, "wb") as f: + f.write(response.content) + else: + print(f"请求失败,状态码:{response.status_code}") + #gdown face parse + url = "https://drive.google.com/uc?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812" + os.makedirs(f"{CheckpointsDir}/face-parse-bisent/") + file_path = f"{CheckpointsDir}/face-parse-bisent/79999_iter.pth" + gdown.download(url, file_path, quiet=False) + #resnet + url = "https://download.pytorch.org/models/resnet18-5c106cde.pth" + response = requests.get(url) + # 确保请求成功 + if response.status_code == 200: + # 指定文件保存的位置 + file_path = f"{CheckpointsDir}/face-parse-bisent/resnet18-5c106cde.pth" + # 将文件内容写入指定位置 + with open(file_path, "wb") as f: + f.write(response.content) + else: + print(f"请求失败,状态码:{response.status_code}") + + + toc = time.time() + + print(f"download cost {toc-tic} seconds") + print_directory_contents(CheckpointsDir) + + else: + print("Already download the model.") + + + + + +download_model() # for huggingface deployment. + + +from musetalk.utils.utils import get_file_type,get_video_fps,datagen +from musetalk.utils.preprocessing import get_landmark_and_bbox,read_imgs,coord_placeholder,get_bbox_range +from musetalk.utils.blending import get_image +from musetalk.utils.utils import load_all_model + + + + + + +@spaces.GPU(duration=600) +@torch.no_grad() +def inference(audio_path,video_path,bbox_shift,progress=gr.Progress(track_tqdm=True)): + args_dict={"result_dir":'./results/output', "fps":25, "batch_size":8, "output_vid_name":'', "use_saved_coord":False}#same with inferenece script + args = Namespace(**args_dict) + + input_basename = os.path.basename(video_path).split('.')[0] + audio_basename = os.path.basename(audio_path).split('.')[0] + output_basename = f"{input_basename}_{audio_basename}" + result_img_save_path = os.path.join(args.result_dir, output_basename) # related to video & audio inputs + crop_coord_save_path = os.path.join(result_img_save_path, input_basename+".pkl") # only related to video input + os.makedirs(result_img_save_path,exist_ok =True) + + if args.output_vid_name=="": + output_vid_name = os.path.join(args.result_dir, output_basename+".mp4") + else: + output_vid_name = os.path.join(args.result_dir, args.output_vid_name) + ############################################## extract frames from source video ############################################## + if get_file_type(video_path)=="video": + save_dir_full = os.path.join(args.result_dir, input_basename) + os.makedirs(save_dir_full,exist_ok = True) + # cmd = f"ffmpeg -v fatal -i {video_path} -start_number 0 {save_dir_full}/%08d.png" + # os.system(cmd) + # 读取视频 + reader = imageio.get_reader(video_path) + + # 保存图片 + for i, im in enumerate(reader): + imageio.imwrite(f"{save_dir_full}/{i:08d}.png", im) + input_img_list = sorted(glob.glob(os.path.join(save_dir_full, '*.[jpJP][pnPN]*[gG]'))) + fps = get_video_fps(video_path) + else: # input img folder + input_img_list = glob.glob(os.path.join(video_path, '*.[jpJP][pnPN]*[gG]')) + input_img_list = sorted(input_img_list, key=lambda x: int(os.path.splitext(os.path.basename(x))[0])) + fps = args.fps + #print(input_img_list) + ############################################## extract audio feature ############################################## + whisper_feature = audio_processor.audio2feat(audio_path) + whisper_chunks = audio_processor.feature2chunks(feature_array=whisper_feature,fps=fps) + ############################################## preprocess input image ############################################## + if os.path.exists(crop_coord_save_path) and args.use_saved_coord: + print("using extracted coordinates") + with open(crop_coord_save_path,'rb') as f: + coord_list = pickle.load(f) + frame_list = read_imgs(input_img_list) + else: + print("extracting landmarks...time consuming") + coord_list, frame_list = get_landmark_and_bbox(input_img_list, bbox_shift) + with open(crop_coord_save_path, 'wb') as f: + pickle.dump(coord_list, f) + bbox_shift_text=get_bbox_range(input_img_list, bbox_shift) + i = 0 + input_latent_list = [] + for bbox, frame in zip(coord_list, frame_list): + if bbox == coord_placeholder: + continue + x1, y1, x2, y2 = bbox + crop_frame = frame[y1:y2, x1:x2] + crop_frame = cv2.resize(crop_frame,(256,256),interpolation = cv2.INTER_LANCZOS4) + latents = vae.get_latents_for_unet(crop_frame) + input_latent_list.append(latents) + + # to smooth the first and the last frame + frame_list_cycle = frame_list + frame_list[::-1] + coord_list_cycle = coord_list + coord_list[::-1] + input_latent_list_cycle = input_latent_list + input_latent_list[::-1] + ############################################## inference batch by batch ############################################## + print("start inference") + video_num = len(whisper_chunks) + batch_size = args.batch_size + gen = datagen(whisper_chunks,input_latent_list_cycle,batch_size) + res_frame_list = [] + for i, (whisper_batch,latent_batch) in enumerate(tqdm(gen,total=int(np.ceil(float(video_num)/batch_size)))): + + tensor_list = [torch.FloatTensor(arr) for arr in whisper_batch] + audio_feature_batch = torch.stack(tensor_list).to(unet.device) # torch, B, 5*N,384 + audio_feature_batch = pe(audio_feature_batch) + + pred_latents = unet.model(latent_batch, timesteps, encoder_hidden_states=audio_feature_batch).sample + recon = vae.decode_latents(pred_latents) + for res_frame in recon: + res_frame_list.append(res_frame) + + ############################################## pad to full image ############################################## + print("pad talking image to original video") + for i, res_frame in enumerate(tqdm(res_frame_list)): + bbox = coord_list_cycle[i%(len(coord_list_cycle))] + ori_frame = copy.deepcopy(frame_list_cycle[i%(len(frame_list_cycle))]) + x1, y1, x2, y2 = bbox + try: + res_frame = cv2.resize(res_frame.astype(np.uint8),(x2-x1,y2-y1)) + except: + # print(bbox) + continue + + combine_frame = get_image(ori_frame,res_frame,bbox) + cv2.imwrite(f"{result_img_save_path}/{str(i).zfill(8)}.png",combine_frame) + + # cmd_img2video = f"ffmpeg -y -v fatal -r {fps} -f image2 -i {result_img_save_path}/%08d.png -vcodec libx264 -vf format=rgb24,scale=out_color_matrix=bt709,format=yuv420p temp.mp4" + # print(cmd_img2video) + # os.system(cmd_img2video) + # 帧率 + fps = 25 + # 图片路径 + # 输出视频路径 + output_video = 'temp.mp4' + + # 读取图片 + def is_valid_image(file): + pattern = re.compile(r'\d{8}\.png') + return pattern.match(file) + + images = [] + files = [file for file in os.listdir(result_img_save_path) if is_valid_image(file)] + files.sort(key=lambda x: int(x.split('.')[0])) + + for file in files: + filename = os.path.join(result_img_save_path, file) + images.append(imageio.imread(filename)) + + + # 保存视频 + imageio.mimwrite(output_video, images, 'FFMPEG', fps=fps, codec='libx264', pixelformat='yuv420p') + + # cmd_combine_audio = f"ffmpeg -y -v fatal -i {audio_path} -i temp.mp4 {output_vid_name}" + # print(cmd_combine_audio) + # os.system(cmd_combine_audio) + + input_video = './temp.mp4' + # Check if the input_video and audio_path exist + if not os.path.exists(input_video): + raise FileNotFoundError(f"Input video file not found: {input_video}") + if not os.path.exists(audio_path): + raise FileNotFoundError(f"Audio file not found: {audio_path}") + + # 读取视频 + reader = imageio.get_reader(input_video) + fps = reader.get_meta_data()['fps'] # 获取原视频的帧率 + reader.close() # 否则在win11上会报错:PermissionError: [WinError 32] 另一个程序正在使用此文件,进程无法访问。: 'temp.mp4' + # 将帧存储在列表中 + frames = images + + # 保存视频并添加音频 + # imageio.mimwrite(output_vid_name, frames, 'FFMPEG', fps=fps, codec='libx264', audio_codec='aac', input_params=['-i', audio_path]) + + # input_video = ffmpeg.input(input_video) + + # input_audio = ffmpeg.input(audio_path) + + print(len(frames)) + + # imageio.mimwrite( + # output_video, + # frames, + # 'FFMPEG', + # fps=25, + # codec='libx264', + # audio_codec='aac', + # input_params=['-i', audio_path], + # output_params=['-y'], # Add the '-y' flag to overwrite the output file if it exists + # ) + # writer = imageio.get_writer(output_vid_name, fps = 25, codec='libx264', quality=10, pixelformat='yuvj444p') + # for im in frames: + # writer.append_data(im) + # writer.close() + + + + + # Load the video + video_clip = VideoFileClip(input_video) + + # Load the audio + audio_clip = AudioFileClip(audio_path) + + # Set the audio to the video + video_clip = video_clip.set_audio(audio_clip) + + # Write the output video + video_clip.write_videofile(output_vid_name, codec='libx264', audio_codec='aac',fps=25) + + os.remove("temp.mp4") + #shutil.rmtree(result_img_save_path) + print(f"result is save to {output_vid_name}") + return output_vid_name,bbox_shift_text + + + +# load model weights +audio_processor,vae,unet,pe = load_all_model() +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +timesteps = torch.tensor([0], device=device) + + + + +def check_video(video): + if not isinstance(video, str): + return video # in case of none type + # Define the output video file name + dir_path, file_name = os.path.split(video) + if file_name.startswith("outputxxx_"): + return video + # Add the output prefix to the file name + output_file_name = "outputxxx_" + file_name + + os.makedirs('./results',exist_ok=True) + os.makedirs('./results/output',exist_ok=True) + os.makedirs('./results/input',exist_ok=True) + + # Combine the directory path and the new file name + output_video = os.path.join('./results/input', output_file_name) + + + # # Run the ffmpeg command to change the frame rate to 25fps + # command = f"ffmpeg -i {video} -r 25 -vcodec libx264 -vtag hvc1 -pix_fmt yuv420p crf 18 {output_video} -y" + + # read video + reader = imageio.get_reader(video) + fps = reader.get_meta_data()['fps'] # get fps from original video + + # conver fps to 25 + frames = [im for im in reader] + target_fps = 25 + + L = len(frames) + L_target = int(L / fps * target_fps) + original_t = [x / fps for x in range(1, L+1)] + t_idx = 0 + target_frames = [] + for target_t in range(1, L_target+1): + while target_t / target_fps > original_t[t_idx]: + t_idx += 1 # find the first t_idx so that target_t / target_fps <= original_t[t_idx] + if t_idx >= L: + break + target_frames.append(frames[t_idx]) + + # save video + imageio.mimwrite(output_video, target_frames, 'FFMPEG', fps=25, codec='libx264', quality=9, pixelformat='yuv420p') + return output_video + + + + +css = """#input_img {max-width: 1024px !important} #output_vid {max-width: 1024px; max-height: 576px}""" + +with gr.Blocks(css=css) as demo: + gr.Markdown( + "

MuseTalk: Real-Time High Quality Lip Synchronization with Latent Space Inpainting

\ +

\ +
\ + Yue Zhang \*,\ + Minhao Liu\*,\ + Zhaokang Chen,\ + Bin Wu,\ + Yingjie He,\ + Chao Zhan,\ + Wenjiang Zhou\ + (*Equal Contribution, Corresponding Author, benbinwu@tencent.com)\ + Lyra Lab, Tencent Music Entertainment\ +

\ + [Github Repo]\ + [Huggingface]\ + [Technical report(Coming Soon)] \ + [Project Page(Coming Soon)]
" + ) + + with gr.Row(): + with gr.Column(): + audio = gr.Audio(label="Driven Audio",type="filepath") + video = gr.Video(label="Reference Video",sources=['upload']) + bbox_shift = gr.Number(label="BBox_shift value, px", value=0) + bbox_shift_scale = gr.Textbox(label="BBox_shift recommend value lower bound,The corresponding bbox range is generated after the initial result is generated. \n If the result is not good, it can be adjusted according to this reference value", value="",interactive=False) + + btn = gr.Button("Generate") + out1 = gr.Video() + + video.change( + fn=check_video, inputs=[video], outputs=[video] + ) + btn.click( + fn=inference, + inputs=[ + audio, + video, + bbox_shift, + ], + outputs=[out1,bbox_shift_scale] + ) + +# Set the IP and port +ip_address = "0.0.0.0" # Replace with your desired IP address +port_number = 7860 # Replace with your desired port number + + +demo.queue().launch( + share=False , debug=True, server_name=ip_address, server_port=port_number +) diff --git a/MuseTalk_project/MuseTalk/assets/BBOX_SHIFT.md b/MuseTalk_project/MuseTalk/assets/BBOX_SHIFT.md new file mode 100644 index 00000000..b164f4a2 --- /dev/null +++ b/MuseTalk_project/MuseTalk/assets/BBOX_SHIFT.md @@ -0,0 +1,26 @@ +## Why is there a "bbox_shift" parameter? +When processing training data, we utilize the combination of face detection results (bbox) and facial landmarks to determine the region of the head segmentation box. Specifically, we use the upper bound of the bbox as the upper boundary of the segmentation box, the maximum y value of the facial landmarks coordinates as the lower boundary of the segmentation box, and the minimum and maximum x values of the landmarks coordinates as the left and right boundaries of the segmentation box. By processing the dataset in this way, we can ensure the integrity of the face. + +However, we have observed that the masked ratio on the face varies across different images due to the varying face shapes of subjects. Furthermore, we found that the upper-bound of the mask mainly lies close to the landmark28, landmark29 and landmark30 landmark points (as shown in Fig.1), which correspond to proportions of 15%, 63%, and 22% in the dataset, respectively. + +During the inference process, we discover that as the upper-bound of the mask gets closer to the mouth (near landmark30), the audio features contribute more to lip movements. Conversely, as the upper-bound of the mask moves away from the mouth (near landmark28), the audio features contribute more to generating details of facial appearance. Hence, we define this characteristic as a parameter that can adjust the contribution of audio features to generating lip movements, which users can modify according to their specific needs in practical scenarios. + +![landmark](figs/landmark_ref.png) + +Fig.1. Facial landmarks +### Step 0. +Running with the default configuration to obtain the adjustable value range. +``` +python -m scripts.inference --inference_config configs/inference/test.yaml +``` +``` +********************************************bbox_shift parameter adjustment********************************************************** +Total frame:「838」 Manually adjust range : [ -9~9 ] , the current value: 0 +************************************************************************************************************************************* +``` +### Step 1. +Re-run the script within the above range. +``` +python -m scripts.inference --inference_config configs/inference/test.yaml --bbox_shift xx # where xx is in [-9, 9]. +``` +In our experimental observations, we found that positive values (moving towards the lower half) generally increase mouth openness, while negative values (moving towards the upper half) generally decrease mouth openness. However, it's important to note that this is not an absolute rule, and users may need to adjust the parameter according to their specific needs and the desired effect. \ No newline at end of file diff --git a/MuseTalk_project/MuseTalk/assets/demo/man/man.png b/MuseTalk_project/MuseTalk/assets/demo/man/man.png new file mode 100644 index 00000000..06a85a28 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/man/man.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/monalisa/monalisa.png b/MuseTalk_project/MuseTalk/assets/demo/monalisa/monalisa.png new file mode 100644 index 00000000..6157ea42 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/monalisa/monalisa.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/musk/musk.png b/MuseTalk_project/MuseTalk/assets/demo/musk/musk.png new file mode 100644 index 00000000..06522be6 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/musk/musk.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/sit/sit.jpeg b/MuseTalk_project/MuseTalk/assets/demo/sit/sit.jpeg new file mode 100644 index 00000000..7178a6be Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/sit/sit.jpeg differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/sun1/sun.png b/MuseTalk_project/MuseTalk/assets/demo/sun1/sun.png new file mode 100644 index 00000000..11f7b85c Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/sun1/sun.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/sun2/sun.png b/MuseTalk_project/MuseTalk/assets/demo/sun2/sun.png new file mode 100644 index 00000000..11f7b85c Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/sun2/sun.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/video1/video1.png b/MuseTalk_project/MuseTalk/assets/demo/video1/video1.png new file mode 100644 index 00000000..f9288c9d Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/video1/video1.png differ diff --git a/MuseTalk_project/MuseTalk/assets/demo/yongen/yongen.jpeg b/MuseTalk_project/MuseTalk/assets/demo/yongen/yongen.jpeg new file mode 100644 index 00000000..953e8fa9 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/demo/yongen/yongen.jpeg differ diff --git a/MuseTalk_project/MuseTalk/assets/figs/landmark_ref.png b/MuseTalk_project/MuseTalk/assets/figs/landmark_ref.png new file mode 100644 index 00000000..2e166ac9 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/figs/landmark_ref.png differ diff --git a/MuseTalk_project/MuseTalk/assets/figs/musetalk_arc.jpg b/MuseTalk_project/MuseTalk/assets/figs/musetalk_arc.jpg new file mode 100644 index 00000000..6b3356a9 Binary files /dev/null and b/MuseTalk_project/MuseTalk/assets/figs/musetalk_arc.jpg differ diff --git a/MuseTalk_project/MuseTalk/configs/inference/realtime.yaml b/MuseTalk_project/MuseTalk/configs/inference/realtime.yaml new file mode 100644 index 00000000..d4092ac2 --- /dev/null +++ b/MuseTalk_project/MuseTalk/configs/inference/realtime.yaml @@ -0,0 +1,10 @@ +avator_1: + preparation: False + bbox_shift: 5 + video_path: "data/video/sun.mp4" + audio_clips: + audio_0: "data/audio/yongen.wav" + audio_1: "data/audio/sun.wav" + + + diff --git a/MuseTalk_project/MuseTalk/configs/inference/test.yaml b/MuseTalk_project/MuseTalk/configs/inference/test.yaml new file mode 100644 index 00000000..4ec8aca9 --- /dev/null +++ b/MuseTalk_project/MuseTalk/configs/inference/test.yaml @@ -0,0 +1,10 @@ +task_0: + video_path: "data/video/yongen.mp4" + audio_path: "data/audio/yongen.wav" + +task_1: + video_path: "data/video/sun.mp4" + audio_path: "data/audio/sun.wav" + bbox_shift: -7 + + diff --git a/MuseTalk_project/MuseTalk/data/audio/sun.wav b/MuseTalk_project/MuseTalk/data/audio/sun.wav new file mode 100644 index 00000000..d431e2b9 Binary files /dev/null and b/MuseTalk_project/MuseTalk/data/audio/sun.wav differ diff --git a/MuseTalk_project/MuseTalk/data/audio/yongen.wav b/MuseTalk_project/MuseTalk/data/audio/yongen.wav new file mode 100644 index 00000000..ec389e3e Binary files /dev/null and b/MuseTalk_project/MuseTalk/data/audio/yongen.wav differ diff --git a/MuseTalk_project/MuseTalk/data/video/sun.mp4 b/MuseTalk_project/MuseTalk/data/video/sun.mp4 new file mode 100644 index 00000000..30221a34 Binary files /dev/null and b/MuseTalk_project/MuseTalk/data/video/sun.mp4 differ diff --git a/MuseTalk_project/MuseTalk/data/video/yongen.mp4 b/MuseTalk_project/MuseTalk/data/video/yongen.mp4 new file mode 100644 index 00000000..3253cbc2 Binary files /dev/null and b/MuseTalk_project/MuseTalk/data/video/yongen.mp4 differ diff --git a/MuseTalk_project/MuseTalk/demo.py b/MuseTalk_project/MuseTalk/demo.py new file mode 100644 index 00000000..81c218e8 --- /dev/null +++ b/MuseTalk_project/MuseTalk/demo.py @@ -0,0 +1,57 @@ +import argparse +import subprocess +import re +import os + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_image", default="data/video/yongen.mp4") + parser.add_argument("--input_audio", default="data/audio/yongen.wav") + parser.add_argument("--output_dir", default="./result") + parser.add_argument("--ground_truth", default=None) + args = parser.parse_args() + + ffmpeg_dir = '/workspace/ffmpeg-4.4-amd64-static' + ffmpeg_executable = os.path.join(ffmpeg_dir, 'ffmpeg') + + # ffmpeg Ŀ¼ PATH + os.environ['PATH'] = ffmpeg_dir + ":" + os.environ.get('PATH', '') + + # ѡ֤ ffmpeg Ƿִ + if not (os.path.isfile(ffmpeg_executable) and os.access(ffmpeg_executable, os.X_OK)): + raise FileNotFoundError(f"ffmpeg not found or not executable at {ffmpeg_executable}") + + os.makedirs('evaluation', exist_ok=True) + input_basename = os.path.basename(args.input_image).split('.')[0] + audio_basename = os.path.basename(args.input_audio).split('.')[0] + output_basename = f"{input_basename}_{audio_basename}" + video_path = os.path.join(args.output_dir, output_basename + '.mp4') + frame_dir = os.path.join(args.output_dir, output_basename) + truth_dir = os.path.join(args.output_dir, output_basename + '_truth') + + # Ƶǰ PATH + env = os.environ.copy() + + command = ['python', 'evaluate.py', + '--input_image', args.input_image, + '--input_audio', args.input_audio, + '--output_dir', args.output_dir] + if args.ground_truth is not None: + command += ['--ground_truth', args.ground_truth] + + # ݻӽ + subprocess.call(command, env=env) + + def extract_floats(text): + return [float(s) for s in re.findall(r'-?\d+\.\d+', text)] + + subprocess.call(['python', 'syncnet/run_pipeline.py', '--videofile', video_path, "--reference", 'wav2lip', '--data_dir', 'tmp_dir'], env=env) + result = subprocess.check_output(['python', 'syncnet/calculate_scores_real_videos.py', '--videofile', video_path, '--reference', 'wav2lip', '--data_dir', 'tmp_dir'], env=env) + result = result.decode('utf-8') + floats = extract_floats(result) + LSE_D, LSE_C = floats[0], floats[1] + with open(f'evaluation/{output_basename}.txt', 'a') as file: + file.write(f'LSE-C: {LSE_C}\n') + file.write(f'LSE-D: {LSE_D}\n') + + print(f'Result has been saved to evaluation/{output_basename}.txt') diff --git a/MuseTalk_project/MuseTalk/evaluate.py b/MuseTalk_project/MuseTalk/evaluate.py new file mode 100644 index 00000000..10fa18df --- /dev/null +++ b/MuseTalk_project/MuseTalk/evaluate.py @@ -0,0 +1,104 @@ +from scripts import inference +import argparse +import os +import cv2 +import skimage +from PIL import Image +import numpy as np +from metrics.niqe import niqe +from skimage.metrics import peak_signal_noise_ratio +from metrics.fid import fid +from skimage.metrics import structural_similarity +import subprocess +import re +from tqdm import tqdm + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--inference_config", type=str, default="configs/inference/test_img.yaml") + parser.add_argument("--bbox_shift", type=int, default=0) + parser.add_argument("--result_dir", default='./results', help="path to output") + + parser.add_argument("--fps", type=int, default=25) + parser.add_argument("--batch_size", type=int, default=8) + parser.add_argument("--output_vid_name", type=str, default=None) + parser.add_argument("--use_saved_coord", + action="store_true", + help='use saved coordinate to save time') + parser.add_argument("--use_float16", + action="store_true", + help="Whether use float16 to speed up inference", + ) + parser.add_argument("--input_image",default="data/video/yongen.mp4") + parser.add_argument("--input_audio",default="data/audio/yongen.wav") + parser.add_argument("--output_dir",default="./result") + parser.add_argument("--ground_truth",default=None) + + args = parser.parse_args() + result=args.output_dir + input_basename = os.path.basename(args.input_image).split('.')[0] + audio_basename = os.path.basename(args.input_audio).split('.')[0] + output_basename = f"{input_basename}_{audio_basename}" + video_path=os.path.join(args.output_dir,output_basename+'.mp4') + frame_dir=os.path.join(args.output_dir,output_basename) + truth_dir=os.path.join(args.output_dir,output_basename+'_truth') + inference.main(args) + + os.makedirs(frame_dir,exist_ok=True) + os.makedirs(truth_dir,exist_ok=True) + capture=cv2.VideoCapture(video_path) + if args.ground_truth==None: + capture_t=cv2.VideoCapture(args.input_image) + else: + print("ground_truth") + capture_t=cv2.VideoCapture(args.ground_truth) + real,gen=[],[] + cnt=0 + while True: + ret,img=capture.read() + ret_t,img_t=capture_t.read() + + if (not ret) or (not ret_t): + break + + cv2.imwrite(f"{frame_dir}/{str(cnt).zfill(8)}.png",img) + cv2.imwrite(f"{truth_dir}/{str(cnt).zfill(8)}.png",img_t) + cnt=cnt+1 + + img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) + img_t=cv2.cvtColor(img_t,cv2.COLOR_BGR2RGB) + gen.append(img) + real.append(img_t) + + FID=fid(truth_dir,frame_dir) + print(FID) + # def extract_floats(text): + # return [float(s) for s in re.findall(r'-?\d+\.\d+', text)] + # subprocess.call(['python','syncnet/run_pipeline.py','--videofile',video_path,"--reference",'wav2lip','--data_dir','tmp_dir']) + # result=subprocess.check_output(['python','syncnet/calculate_scores_real_videos.py','--videofile',video_path,'--reference','wav2lip','--data_dir','tmp_dir']) + # result=result.decode('utf-8') + # floats=extract_floats(result) + # LSE_D,LSE_C=floats[0],floats[1] + # print(LSE_D,LSE_C) + + NIQE,PSNR,SSIM=0,0,0 + for i in tqdm(range(len(gen))): + img,img_t=gen[i],real[i] + image = Image.fromarray(img) + image_t=Image.fromarray(img_t) + image_g = np.array(image.convert('LA'))[:, :, 0] + N,P,S=niqe(image_g),peak_signal_noise_ratio(img_t/255.0,img/255.0),structural_similarity(img,img_t,channel_axis=2) + #print(f'{i}: {N} {P} {S}') + NIQE=NIQE+N + PSNR=PSNR+P #DB + SSIM=SSIM+S + cnt=len(gen) + NIQE,PSNR,SSIM=NIQE/cnt,PSNR/cnt,SSIM/cnt + + result_file=f'evaluation/{output_basename}.txt' + with open(result_file,'w') as file: + file.write(f'NIQE: {NIQE}\n') + file.write(f'PSNR: {PSNR}\n') + file.write(f'FID: {FID}\n') + file.write(f'SSIM: {SSIM}\n') + diff --git a/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/GPLv3.txt b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/GPLv3.txt new file mode 100644 index 00000000..94a9ed02 --- /dev/null +++ b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/GPLv3.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffmpeg b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffmpeg new file mode 100644 index 00000000..aa22e7e7 Binary files /dev/null and b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffmpeg differ diff --git a/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffprobe b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffprobe new file mode 100644 index 00000000..aa752ed6 Binary files /dev/null and b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/ffprobe differ diff --git a/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/manpages/ffmpeg-all.txt b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/manpages/ffmpeg-all.txt new file mode 100644 index 00000000..be5281d6 --- /dev/null +++ b/MuseTalk_project/MuseTalk/ffmpeg-4.4-amd64-static/manpages/ffmpeg-all.txt @@ -0,0 +1,42496 @@ +FFMPEG-ALL(1) FFMPEG-ALL(1) + +NAME + ffmpeg - ffmpeg video converter + +SYNOPSIS + ffmpeg [global_options] {[input_file_options] -i input_url} ... + {[output_file_options] output_url} ... + +DESCRIPTION + ffmpeg is a very fast video and audio converter that can also grab from + a live audio/video source. It can also convert between arbitrary sample + rates and resize video on the fly with a high quality polyphase filter. + + ffmpeg reads from an arbitrary number of input "files" (which can be + regular files, pipes, network streams, grabbing devices, etc.), + specified by the "-i" option, and writes to an arbitrary number of + output "files", which are specified by a plain output url. Anything + found on the command line which cannot be interpreted as an option is + considered to be an output url. + + Each input or output url can, in principle, contain any number of + streams of different types (video/audio/subtitle/attachment/data). The + allowed number and/or types of streams may be limited by the container + format. Selecting which streams from which inputs will go into which + output is either done automatically or with the "-map" option (see the + Stream selection chapter). + + To refer to input files in options, you must use their indices + (0-based). E.g. the first input file is 0, the second is 1, etc. + Similarly, streams within a file are referred to by their indices. E.g. + "2:3" refers to the fourth stream in the third input file. Also see the + Stream specifiers chapter. + + As a general rule, options are applied to the next specified file. + Therefore, order is important, and you can have the same option on the + command line multiple times. Each occurrence is then applied to the + next input or output file. Exceptions from this rule are the global + options (e.g. verbosity level), which should be specified first. + + Do not mix input and output files -- first specify all input files, + then all output files. Also do not mix options which belong to + different files. All options apply ONLY to the next input or output + file and are reset between files. + + o To set the video bitrate of the output file to 64 kbit/s: + + ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi + + o To force the frame rate of the output file to 24 fps: + + ffmpeg -i input.avi -r 24 output.avi + + o To force the frame rate of the input file (valid for raw formats + only) to 1 fps and the frame rate of the output file to 24 fps: + + ffmpeg -r 1 -i input.m2v -r 24 output.avi + + The format option may be needed for raw input files. + +DETAILED DESCRIPTION + The transcoding process in ffmpeg for each output can be described by + the following diagram: + + _______ ______________ + | | | | + | input | demuxer | encoded data | decoder + | file | ---------> | packets | -----+ + |_______| |______________| | + v + _________ + | | + | decoded | + | frames | + |_________| + ________ ______________ | + | | | | | + | output | <-------- | encoded data | <----+ + | file | muxer | packets | encoder + |________| |______________| + + ffmpeg calls the libavformat library (containing demuxers) to read + input files and get packets containing encoded data from them. When + there are multiple input files, ffmpeg tries to keep them synchronized + by tracking lowest timestamp on any active input stream. + + Encoded packets are then passed to the decoder (unless streamcopy is + selected for the stream, see further for a description). The decoder + produces uncompressed frames (raw video/PCM audio/...) which can be + processed further by filtering (see next section). After filtering, the + frames are passed to the encoder, which encodes them and outputs + encoded packets. Finally those are passed to the muxer, which writes + the encoded packets to the output file. + + Filtering + Before encoding, ffmpeg can process raw audio and video frames using + filters from the libavfilter library. Several chained filters form a + filter graph. ffmpeg distinguishes between two types of filtergraphs: + simple and complex. + + Simple filtergraphs + + Simple filtergraphs are those that have exactly one input and output, + both of the same type. In the above diagram they can be represented by + simply inserting an additional step between decoding and encoding: + + _________ ______________ + | | | | + | decoded | | encoded data | + | frames |\ _ | packets | + |_________| \ /||______________| + \ __________ / + simple _\|| | / encoder + filtergraph | filtered |/ + | frames | + |__________| + + Simple filtergraphs are configured with the per-stream -filter option + (with -vf and -af aliases for video and audio respectively). A simple + filtergraph for video can look for example like this: + + _______ _____________ _______ ________ + | | | | | | | | + | input | ---> | deinterlace | ---> | scale | ---> | output | + |_______| |_____________| |_______| |________| + + Note that some filters change frame properties but not frame contents. + E.g. the "fps" filter in the example above changes number of frames, + but does not touch the frame contents. Another example is the "setpts" + filter, which only sets timestamps and otherwise passes the frames + unchanged. + + Complex filtergraphs + + Complex filtergraphs are those which cannot be described as simply a + linear processing chain applied to one stream. This is the case, for + example, when the graph has more than one input and/or output, or when + output stream type is different from input. They can be represented + with the following diagram: + + _________ + | | + | input 0 |\ __________ + |_________| \ | | + \ _________ /| output 0 | + \ | | / |__________| + _________ \| complex | / + | | | |/ + | input 1 |---->| filter |\ + |_________| | | \ __________ + /| graph | \ | | + / | | \| output 1 | + _________ / |_________| |__________| + | | / + | input 2 |/ + |_________| + + Complex filtergraphs are configured with the -filter_complex option. + Note that this option is global, since a complex filtergraph, by its + nature, cannot be unambiguously associated with a single stream or + file. + + The -lavfi option is equivalent to -filter_complex. + + A trivial example of a complex filtergraph is the "overlay" filter, + which has two video inputs and one video output, containing one video + overlaid on top of the other. Its audio counterpart is the "amix" + filter. + + Stream copy + Stream copy is a mode selected by supplying the "copy" parameter to the + -codec option. It makes ffmpeg omit the decoding and encoding step for + the specified stream, so it does only demuxing and muxing. It is useful + for changing the container format or modifying container-level + metadata. The diagram above will, in this case, simplify to this: + + _______ ______________ ________ + | | | | | | + | input | demuxer | encoded data | muxer | output | + | file | ---------> | packets | -------> | file | + |_______| |______________| |________| + + Since there is no decoding or encoding, it is very fast and there is no + quality loss. However, it might not work in some cases because of many + factors. Applying filters is obviously also impossible, since filters + work on uncompressed data. + +STREAM SELECTION + ffmpeg provides the "-map" option for manual control of stream + selection in each output file. Users can skip "-map" and let ffmpeg + perform automatic stream selection as described below. The "-vn / -an / + -sn / -dn" options can be used to skip inclusion of video, audio, + subtitle and data streams respectively, whether manually mapped or + automatically selected, except for those streams which are outputs of + complex filtergraphs. + + Description + The sub-sections that follow describe the various rules that are + involved in stream selection. The examples that follow next show how + these rules are applied in practice. + + While every effort is made to accurately reflect the behavior of the + program, FFmpeg is under continuous development and the code may have + changed since the time of this writing. + + Automatic stream selection + + In the absence of any map options for a particular output file, ffmpeg + inspects the output format to check which type of streams can be + included in it, viz. video, audio and/or subtitles. For each acceptable + stream type, ffmpeg will pick one stream, when available, from among + all the inputs. + + It will select that stream based upon the following criteria: + + o for video, it is the stream with the highest resolution, + + o for audio, it is the stream with the most channels, + + o for subtitles, it is the first subtitle stream found but there's a + caveat. The output format's default subtitle encoder can be either + text-based or image-based, and only a subtitle stream of the same + type will be chosen. + + In the case where several streams of the same type rate equally, the + stream with the lowest index is chosen. + + Data or attachment streams are not automatically selected and can only + be included using "-map". + + Manual stream selection + + When "-map" is used, only user-mapped streams are included in that + output file, with one possible exception for filtergraph outputs + described below. + + Complex filtergraphs + + If there are any complex filtergraph output streams with unlabeled + pads, they will be added to the first output file. This will lead to a + fatal error if the stream type is not supported by the output format. + In the absence of the map option, the inclusion of these streams leads + to the automatic stream selection of their types being skipped. If map + options are present, these filtergraph streams are included in addition + to the mapped streams. + + Complex filtergraph output streams with labeled pads must be mapped + once and exactly once. + + Stream handling + + Stream handling is independent of stream selection, with an exception + for subtitles described below. Stream handling is set via the "-codec" + option addressed to streams within a specific output file. In + particular, codec options are applied by ffmpeg after the stream + selection process and thus do not influence the latter. If no "-codec" + option is specified for a stream type, ffmpeg will select the default + encoder registered by the output file muxer. + + An exception exists for subtitles. If a subtitle encoder is specified + for an output file, the first subtitle stream found of any type, text + or image, will be included. ffmpeg does not validate if the specified + encoder can convert the selected stream or if the converted stream is + acceptable within the output format. This applies generally as well: + when the user sets an encoder manually, the stream selection process + cannot check if the encoded stream can be muxed into the output file. + If it cannot, ffmpeg will abort and all output files will fail to be + processed. + + Examples + The following examples illustrate the behavior, quirks and limitations + of ffmpeg's stream selection methods. + + They assume the following three input files. + + input file 'A.avi' + stream 0: video 640x360 + stream 1: audio 2 channels + + input file 'B.mp4' + stream 0: video 1920x1080 + stream 1: audio 2 channels + stream 2: subtitles (text) + stream 3: audio 5.1 channels + stream 4: subtitles (text) + + input file 'C.mkv' + stream 0: video 1280x720 + stream 1: audio 2 channels + stream 2: subtitles (image) + + Example: automatic stream selection + + ffmpeg -i A.avi -i B.mp4 out1.mkv out2.wav -map 1:a -c:a copy out3.mov + + There are three output files specified, and for the first two, no + "-map" options are set, so ffmpeg will select streams for these two + files automatically. + + out1.mkv is a Matroska container file and accepts video, audio and + subtitle streams, so ffmpeg will try to select one of each type.For + video, it will select "stream 0" from B.mp4, which has the highest + resolution among all the input video streams.For audio, it will select + "stream 3" from B.mp4, since it has the greatest number of channels.For + subtitles, it will select "stream 2" from B.mp4, which is the first + subtitle stream from among A.avi and B.mp4. + + out2.wav accepts only audio streams, so only "stream 3" from B.mp4 is + selected. + + For out3.mov, since a "-map" option is set, no automatic stream + selection will occur. The "-map 1:a" option will select all audio + streams from the second input B.mp4. No other streams will be included + in this output file. + + For the first two outputs, all included streams will be transcoded. The + encoders chosen will be the default ones registered by each output + format, which may not match the codec of the selected input streams. + + For the third output, codec option for audio streams has been set to + "copy", so no decoding-filtering-encoding operations will occur, or can + occur. Packets of selected streams shall be conveyed from the input + file and muxed within the output file. + + Example: automatic subtitles selection + + ffmpeg -i C.mkv out1.mkv -c:s dvdsub -an out2.mkv + + Although out1.mkv is a Matroska container file which accepts subtitle + streams, only a video and audio stream shall be selected. The subtitle + stream of C.mkv is image-based and the default subtitle encoder of the + Matroska muxer is text-based, so a transcode operation for the + subtitles is expected to fail and hence the stream isn't selected. + However, in out2.mkv, a subtitle encoder is specified in the command + and so, the subtitle stream is selected, in addition to the video + stream. The presence of "-an" disables audio stream selection for + out2.mkv. + + Example: unlabeled filtergraph outputs + + ffmpeg -i A.avi -i C.mkv -i B.mp4 -filter_complex "overlay" out1.mp4 out2.srt + + A filtergraph is setup here using the "-filter_complex" option and + consists of a single video filter. The "overlay" filter requires + exactly two video inputs, but none are specified, so the first two + available video streams are used, those of A.avi and C.mkv. The output + pad of the filter has no label and so is sent to the first output file + out1.mp4. Due to this, automatic selection of the video stream is + skipped, which would have selected the stream in B.mp4. The audio + stream with most channels viz. "stream 3" in B.mp4, is chosen + automatically. No subtitle stream is chosen however, since the MP4 + format has no default subtitle encoder registered, and the user hasn't + specified a subtitle encoder. + + The 2nd output file, out2.srt, only accepts text-based subtitle + streams. So, even though the first subtitle stream available belongs to + C.mkv, it is image-based and hence skipped. The selected stream, + "stream 2" in B.mp4, is the first text-based subtitle stream. + + Example: labeled filtergraph outputs + + ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0[outv];overlay;aresample" \ + -map '[outv]' -an out1.mp4 \ + out2.mkv \ + -map '[outv]' -map 1:a:0 out3.mkv + + The above command will fail, as the output pad labelled "[outv]" has + been mapped twice. None of the output files shall be processed. + + ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0[outv];overlay;aresample" \ + -an out1.mp4 \ + out2.mkv \ + -map 1:a:0 out3.mkv + + This command above will also fail as the hue filter output has a label, + "[outv]", and hasn't been mapped anywhere. + + The command should be modified as follows, + + ffmpeg -i A.avi -i B.mp4 -i C.mkv -filter_complex "[1:v]hue=s=0,split=2[outv1][outv2];overlay;aresample" \ + -map '[outv1]' -an out1.mp4 \ + out2.mkv \ + -map '[outv2]' -map 1:a:0 out3.mkv + + The video stream from B.mp4 is sent to the hue filter, whose output is + cloned once using the split filter, and both outputs labelled. Then a + copy each is mapped to the first and third output files. + + The overlay filter, requiring two video inputs, uses the first two + unused video streams. Those are the streams from A.avi and C.mkv. The + overlay output isn't labelled, so it is sent to the first output file + out1.mp4, regardless of the presence of the "-map" option. + + The aresample filter is sent the first unused audio stream, that of + A.avi. Since this filter output is also unlabelled, it too is mapped to + the first output file. The presence of "-an" only suppresses automatic + or manual stream selection of audio streams, not outputs sent from + filtergraphs. Both these mapped streams shall be ordered before the + mapped stream in out1.mp4. + + The video, audio and subtitle streams mapped to "out2.mkv" are entirely + determined by automatic stream selection. + + out3.mkv consists of the cloned video output from the hue filter and + the first audio stream from B.mp4. + +OPTIONS + All the numerical options, if not specified otherwise, accept a string + representing a number as input, which may be followed by one of the SI + unit prefixes, for example: 'K', 'M', or 'G'. + + If 'i' is appended to the SI unit prefix, the complete prefix will be + interpreted as a unit prefix for binary multiples, which are based on + powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit + prefix multiplies the value by 8. This allows using, for example: 'KB', + 'MiB', 'G' and 'B' as number suffixes. + + Options which do not take arguments are boolean options, and set the + corresponding value to true. They can be set to false by prefixing the + option name with "no". For example using "-nofoo" will set the boolean + option with name "foo" to false. + + Stream specifiers + Some options are applied per-stream, e.g. bitrate or codec. Stream + specifiers are used to precisely specify which stream(s) a given option + belongs to. + + A stream specifier is a string generally appended to the option name + and separated from it by a colon. E.g. "-codec:a:1 ac3" contains the + "a:1" stream specifier, which matches the second audio stream. + Therefore, it would select the ac3 codec for the second audio stream. + + A stream specifier can match several streams, so that the option is + applied to all of them. E.g. the stream specifier in "-b:a 128k" + matches all audio streams. + + An empty stream specifier matches all streams. For example, "-codec + copy" or "-codec: copy" would copy all the streams without reencoding. + + Possible forms of stream specifiers are: + + stream_index + Matches the stream with this index. E.g. "-threads:1 4" would set + the thread count for the second stream to 4. If stream_index is + used as an additional stream specifier (see below), then it selects + stream number stream_index from the matching streams. Stream + numbering is based on the order of the streams as detected by + libavformat except when a program ID is also specified. In this + case it is based on the ordering of the streams in the program. + + stream_type[:additional_stream_specifier] + stream_type is one of following: 'v' or 'V' for video, 'a' for + audio, 's' for subtitle, 'd' for data, and 't' for attachments. 'v' + matches all video streams, 'V' only matches video streams which are + not attached pictures, video thumbnails or cover arts. If + additional_stream_specifier is used, then it matches streams which + both have this type and match the additional_stream_specifier. + Otherwise, it matches all streams of the specified type. + + p:program_id[:additional_stream_specifier] + Matches streams which are in the program with the id program_id. If + additional_stream_specifier is used, then it matches streams which + both are part of the program and match the + additional_stream_specifier. + + #stream_id or i:stream_id + Match the stream by stream id (e.g. PID in MPEG-TS container). + + m:key[:value] + Matches streams with the metadata tag key having the specified + value. If value is not given, matches streams that contain the + given tag with any value. + + u Matches streams with usable configuration, the codec must be + defined and the essential information such as video dimension or + audio sample rate must be present. + + Note that in ffmpeg, matching by metadata will only work properly + for input files. + + Generic options + These options are shared amongst the ff* tools. + + -L Show license. + + -h, -?, -help, --help [arg] + Show help. An optional parameter may be specified to print help + about a specific item. If no argument is specified, only basic (non + advanced) tool options are shown. + + Possible values of arg are: + + long + Print advanced tool options in addition to the basic tool + options. + + full + Print complete list of options, including shared and private + options for encoders, decoders, demuxers, muxers, filters, etc. + + decoder=decoder_name + Print detailed information about the decoder named + decoder_name. Use the -decoders option to get a list of all + decoders. + + encoder=encoder_name + Print detailed information about the encoder named + encoder_name. Use the -encoders option to get a list of all + encoders. + + demuxer=demuxer_name + Print detailed information about the demuxer named + demuxer_name. Use the -formats option to get a list of all + demuxers and muxers. + + muxer=muxer_name + Print detailed information about the muxer named muxer_name. + Use the -formats option to get a list of all muxers and + demuxers. + + filter=filter_name + Print detailed information about the filter named filter_name. + Use the -filters option to get a list of all filters. + + bsf=bitstream_filter_name + Print detailed information about the bitstream filter named + bitstream_filter_name. Use the -bsfs option to get a list of + all bitstream filters. + + protocol=protocol_name + Print detailed information about the protocol named + protocol_name. Use the -protocols option to get a list of all + protocols. + + -version + Show version. + + -buildconf + Show the build configuration, one option per line. + + -formats + Show available formats (including devices). + + -demuxers + Show available demuxers. + + -muxers + Show available muxers. + + -devices + Show available devices. + + -codecs + Show all codecs known to libavcodec. + + Note that the term 'codec' is used throughout this documentation as + a shortcut for what is more correctly called a media bitstream + format. + + -decoders + Show available decoders. + + -encoders + Show all available encoders. + + -bsfs + Show available bitstream filters. + + -protocols + Show available protocols. + + -filters + Show available libavfilter filters. + + -pix_fmts + Show available pixel formats. + + -sample_fmts + Show available sample formats. + + -layouts + Show channel names and standard channel layouts. + + -colors + Show recognized color names. + + -sources device[,opt1=val1[,opt2=val2]...] + Show autodetected sources of the input device. Some devices may + provide system-dependent source names that cannot be autodetected. + The returned list cannot be assumed to be always complete. + + ffmpeg -sources pulse,server=192.168.0.4 + + -sinks device[,opt1=val1[,opt2=val2]...] + Show autodetected sinks of the output device. Some devices may + provide system-dependent sink names that cannot be autodetected. + The returned list cannot be assumed to be always complete. + + ffmpeg -sinks pulse,server=192.168.0.4 + + -loglevel [flags+]loglevel | -v [flags+]loglevel + Set logging level and flags used by the library. + + The optional flags prefix can consist of the following values: + + repeat + Indicates that repeated log output should not be compressed to + the first line and the "Last message repeated n times" line + will be omitted. + + level + Indicates that log output should add a "[level]" prefix to each + message line. This can be used as an alternative to log + coloring, e.g. when dumping the log to file. + + Flags can also be used alone by adding a '+'/'-' prefix to + set/reset a single flag without affecting other flags or changing + loglevel. When setting both flags and loglevel, a '+' separator is + expected between the last flags value and before loglevel. + + loglevel is a string or a number containing one of the following + values: + + quiet, -8 + Show nothing at all; be silent. + + panic, 0 + Only show fatal errors which could lead the process to crash, + such as an assertion failure. This is not currently used for + anything. + + fatal, 8 + Only show fatal errors. These are errors after which the + process absolutely cannot continue. + + error, 16 + Show all errors, including ones which can be recovered from. + + warning, 24 + Show all warnings and errors. Any message related to possibly + incorrect or unexpected events will be shown. + + info, 32 + Show informative messages during processing. This is in + addition to warnings and errors. This is the default value. + + verbose, 40 + Same as "info", except more verbose. + + debug, 48 + Show everything, including debugging information. + + trace, 56 + + For example to enable repeated log output, add the "level" prefix, + and set loglevel to "verbose": + + ffmpeg -loglevel repeat+level+verbose -i input output + + Another example that enables repeated log output without affecting + current state of "level" prefix flag or loglevel: + + ffmpeg [...] -loglevel +repeat + + By default the program logs to stderr. If coloring is supported by + the terminal, colors are used to mark errors and warnings. Log + coloring can be disabled setting the environment variable + AV_LOG_FORCE_NOCOLOR, or can be forced setting the environment + variable AV_LOG_FORCE_COLOR. + + -report + Dump full command line and log output to a file named + "program-YYYYMMDD-HHMMSS.log" in the current directory. This file + can be useful for bug reports. It also implies "-loglevel debug". + + Setting the environment variable FFREPORT to any value has the same + effect. If the value is a ':'-separated key=value sequence, these + options will affect the report; option values must be escaped if + they contain special characters or the options delimiter ':' (see + the ``Quoting and escaping'' section in the ffmpeg-utils manual). + + The following options are recognized: + + file + set the file name to use for the report; %p is expanded to the + name of the program, %t is expanded to a timestamp, "%%" is + expanded to a plain "%" + + level + set the log verbosity level using a numerical value (see + "-loglevel"). + + For example, to output a report to a file named ffreport.log using + a log level of 32 (alias for log level "info"): + + FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output + + Errors in parsing the environment variable are not fatal, and will + not appear in the report. + + -hide_banner + Suppress printing banner. + + All FFmpeg tools will normally show a copyright notice, build + options and library versions. This option can be used to suppress + printing this information. + + -cpuflags flags (global) + Allows setting and clearing cpu flags. This option is intended for + testing. Do not use it unless you know what you're doing. + + ffmpeg -cpuflags -sse+mmx ... + ffmpeg -cpuflags mmx ... + ffmpeg -cpuflags 0 ... + + Possible flags for this option are: + + x86 + mmx + mmxext + sse + sse2 + sse2slow + sse3 + sse3slow + ssse3 + atom + sse4.1 + sse4.2 + avx + avx2 + xop + fma3 + fma4 + 3dnow + 3dnowext + bmi1 + bmi2 + cmov + ARM + armv5te + armv6 + armv6t2 + vfp + vfpv3 + neon + setend + AArch64 + armv8 + vfp + neon + PowerPC + altivec + Specific Processors + pentium2 + pentium3 + pentium4 + k6 + k62 + athlon + athlonxp + k8 + -max_alloc bytes + Set the maximum size limit for allocating a block on the heap by + ffmpeg's family of malloc functions. Exercise extreme caution when + using this option. Don't use if you do not understand the full + consequence of doing so. Default is INT_MAX. + + AVOptions + These options are provided directly by the libavformat, libavdevice and + libavcodec libraries. To see the list of available AVOptions, use the + -help option. They are separated into two categories: + + generic + These options can be set for any container, codec or device. + Generic options are listed under AVFormatContext options for + containers/devices and under AVCodecContext options for codecs. + + private + These options are specific to the given container, device or codec. + Private options are listed under their corresponding + containers/devices/codecs. + + For example to write an ID3v2.3 header instead of a default ID3v2.4 to + an MP3 file, use the id3v2_version private option of the MP3 muxer: + + ffmpeg -i input.flac -id3v2_version 3 out.mp3 + + All codec AVOptions are per-stream, and thus a stream specifier should + be attached to them: + + ffmpeg -i multichannel.mxf -map 0:v:0 -map 0:a:0 -map 0:a:0 -c:a:0 ac3 -b:a:0 640k -ac:a:1 2 -c:a:1 aac -b:2 128k out.mp4 + + In the above example, a multichannel audio stream is mapped twice for + output. The first instance is encoded with codec ac3 and bitrate 640k. + The second instance is downmixed to 2 channels and encoded with codec + aac. A bitrate of 128k is specified for it using absolute index of the + output stream. + + Note: the -nooption syntax cannot be used for boolean AVOptions, use + -option 0/-option 1. + + Note: the old undocumented way of specifying per-stream AVOptions by + prepending v/a/s to the options name is now obsolete and will be + removed soon. + + Main options + -f fmt (input/output) + Force input or output file format. The format is normally auto + detected for input files and guessed from the file extension for + output files, so this option is not needed in most cases. + + -i url (input) + input file url + + -y (global) + Overwrite output files without asking. + + -n (global) + Do not overwrite output files, and exit immediately if a specified + output file already exists. + + -stream_loop number (input) + Set number of times input stream shall be looped. Loop 0 means no + loop, loop -1 means infinite loop. + + -c[:stream_specifier] codec (input/output,per-stream) + -codec[:stream_specifier] codec (input/output,per-stream) + Select an encoder (when used before an output file) or a decoder + (when used before an input file) for one or more streams. codec is + the name of a decoder/encoder or a special value "copy" (output + only) to indicate that the stream is not to be re-encoded. + + For example + + ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT + + encodes all video streams with libx264 and copies all audio + streams. + + For each stream, the last matching "c" option is applied, so + + ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT + + will copy all the streams except the second video, which will be + encoded with libx264, and the 138th audio, which will be encoded + with libvorbis. + + -t duration (input/output) + When used as an input option (before "-i"), limit the duration of + data read from the input file. + + When used as an output option (before an output url), stop writing + the output after its duration reaches duration. + + duration must be a time duration specification, see the Time + duration section in the ffmpeg-utils(1) manual. + + -to and -t are mutually exclusive and -t has priority. + + -to position (input/output) + Stop writing the output or reading the input at position. position + must be a time duration specification, see the Time duration + section in the ffmpeg-utils(1) manual. + + -to and -t are mutually exclusive and -t has priority. + + -fs limit_size (output) + Set the file size limit, expressed in bytes. No further chunk of + bytes is written after the limit is exceeded. The size of the + output file is slightly more than the requested file size. + + -ss position (input/output) + When used as an input option (before "-i"), seeks in this input + file to position. Note that in most formats it is not possible to + seek exactly, so ffmpeg will seek to the closest seek point before + position. When transcoding and -accurate_seek is enabled (the + default), this extra segment between the seek point and position + will be decoded and discarded. When doing stream copy or when + -noaccurate_seek is used, it will be preserved. + + When used as an output option (before an output url), decodes but + discards input until the timestamps reach position. + + position must be a time duration specification, see the Time + duration section in the ffmpeg-utils(1) manual. + + -sseof position (input) + Like the "-ss" option but relative to the "end of file". That is + negative values are earlier in the file, 0 is at EOF. + + -itsoffset offset (input) + Set the input time offset. + + offset must be a time duration specification, see the Time duration + section in the ffmpeg-utils(1) manual. + + The offset is added to the timestamps of the input files. + Specifying a positive offset means that the corresponding streams + are delayed by the time duration specified in offset. + + -itsscale scale (input,per-stream) + Rescale input timestamps. scale should be a floating point number. + + -timestamp date (output) + Set the recording timestamp in the container. + + date must be a date specification, see the Date section in the + ffmpeg-utils(1) manual. + + -metadata[:metadata_specifier] key=value (output,per-metadata) + Set a metadata key/value pair. + + An optional metadata_specifier may be given to set metadata on + streams, chapters or programs. See "-map_metadata" documentation + for details. + + This option overrides metadata set with "-map_metadata". It is also + possible to delete metadata by using an empty value. + + For example, for setting the title in the output file: + + ffmpeg -i in.avi -metadata title="my title" out.flv + + To set the language of the first audio stream: + + ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT + + -disposition[:stream_specifier] value (output,per-stream) + Sets the disposition for a stream. + + This option overrides the disposition copied from the input stream. + It is also possible to delete the disposition by setting it to 0. + + The following dispositions are recognized: + + default + dub + original + comment + lyrics + karaoke + forced + hearing_impaired + visual_impaired + clean_effects + attached_pic + captions + descriptions + dependent + metadata + + For example, to make the second audio stream the default stream: + + ffmpeg -i in.mkv -c copy -disposition:a:1 default out.mkv + + To make the second subtitle stream the default stream and remove + the default disposition from the first subtitle stream: + + ffmpeg -i in.mkv -c copy -disposition:s:0 0 -disposition:s:1 default out.mkv + + To add an embedded cover/thumbnail: + + ffmpeg -i in.mp4 -i IMAGE -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic out.mp4 + + Not all muxers support embedded thumbnails, and those who do, only + support a few formats, like JPEG or PNG. + + -program + [title=title:][program_num=program_num:]st=stream[:st=stream...] + (output) + Creates a program with the specified title, program_num and adds + the specified stream(s) to it. + + -target type (output) + Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50"). type + may be prefixed with "pal-", "ntsc-" or "film-" to use the + corresponding standard. All the format options (bitrate, codecs, + buffer sizes) are then set automatically. You can just type: + + ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg + + Nevertheless you can specify additional options as long as you know + they do not conflict with the standard, as in: + + ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg + + The parameters set for each target are as follows. + + VCD + + : + -f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324 + -s 352x288 -r 25 + -codec:v mpeg1video -g 15 -b:v 1150k -maxrate:v 1150v -minrate:v 1150k -bufsize:v 327680 + -ar 44100 -ac 2 + -codec:a mp2 -b:a 224k + + : + -f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324 + -s 352x240 -r 30000/1001 + -codec:v mpeg1video -g 18 -b:v 1150k -maxrate:v 1150v -minrate:v 1150k -bufsize:v 327680 + -ar 44100 -ac 2 + -codec:a mp2 -b:a 224k + + : + -f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324 + -s 352x240 -r 24000/1001 + -codec:v mpeg1video -g 18 -b:v 1150k -maxrate:v 1150v -minrate:v 1150k -bufsize:v 327680 + -ar 44100 -ac 2 + -codec:a mp2 -b:a 224k + + SVCD + + : + -f svcd -packetsize 2324 + -s 480x576 -pix_fmt yuv420p -r 25 + -codec:v mpeg2video -g 15 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1 + -ar 44100 + -codec:a mp2 -b:a 224k + + : + -f svcd -packetsize 2324 + -s 480x480 -pix_fmt yuv420p -r 30000/1001 + -codec:v mpeg2video -g 18 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1 + -ar 44100 + -codec:a mp2 -b:a 224k + + : + -f svcd -packetsize 2324 + -s 480x480 -pix_fmt yuv420p -r 24000/1001 + -codec:v mpeg2video -g 18 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1 + -ar 44100 + -codec:a mp2 -b:a 224k + + DVD + + : + -f dvd -muxrate 10080k -packetsize 2048 + -s 720x576 -pix_fmt yuv420p -r 25 + -codec:v mpeg2video -g 15 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008 + -ar 48000 + -codec:a ac3 -b:a 448k + + : + -f dvd -muxrate 10080k -packetsize 2048 + -s 720x480 -pix_fmt yuv420p -r 30000/1001 + -codec:v mpeg2video -g 18 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008 + -ar 48000 + -codec:a ac3 -b:a 448k + + : + -f dvd -muxrate 10080k -packetsize 2048 + -s 720x480 -pix_fmt yuv420p -r 24000/1001 + -codec:v mpeg2video -g 18 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008 + -ar 48000 + -codec:a ac3 -b:a 448k + + DV + + : + -f dv + -s 720x576 -pix_fmt yuv420p -r 25 + -ar 48000 -ac 2 + + : + -f dv + -s 720x480 -pix_fmt yuv411p -r 30000/1001 + -ar 48000 -ac 2 + + : + -f dv + -s 720x480 -pix_fmt yuv411p -r 24000/1001 + -ar 48000 -ac 2 + + The "dv50" target is identical to the "dv" target except that the + pixel format set is "yuv422p" for all three standards. + + Any user-set value for a parameter above will override the target + preset value. In that case, the output may not comply with the + target standard. + + -dn (input/output) + As an input option, blocks all data streams of a file from being + filtered or being automatically selected or mapped for any output. + See "-discard" option to disable streams individually. + + As an output option, disables data recording i.e. automatic + selection or mapping of any data stream. For full manual control + see the "-map" option. + + -dframes number (output) + Set the number of data frames to output. This is an obsolete alias + for "-frames:d", which you should use instead. + + -frames[:stream_specifier] framecount (output,per-stream) + Stop writing to the stream after framecount frames. + + -q[:stream_specifier] q (output,per-stream) + -qscale[:stream_specifier] q (output,per-stream) + Use fixed quality scale (VBR). The meaning of q/qscale is codec- + dependent. If qscale is used without a stream_specifier then it + applies only to the video stream, this is to maintain compatibility + with previous behavior and as specifying the same codec specific + value to 2 different codecs that is audio and video generally is + not what is intended when no stream_specifier is used. + + -filter[:stream_specifier] filtergraph (output,per-stream) + Create the filtergraph specified by filtergraph and use it to + filter the stream. + + filtergraph is a description of the filtergraph to apply to the + stream, and must have a single input and a single output of the + same type of the stream. In the filtergraph, the input is + associated to the label "in", and the output to the label "out". + See the ffmpeg-filters manual for more information about the + filtergraph syntax. + + See the -filter_complex option if you want to create filtergraphs + with multiple inputs and/or outputs. + + -filter_script[:stream_specifier] filename (output,per-stream) + This option is similar to -filter, the only difference is that its + argument is the name of the file from which a filtergraph + description is to be read. + + -filter_threads nb_threads (global) + Defines how many threads are used to process a filter pipeline. + Each pipeline will produce a thread pool with this many threads + available for parallel processing. The default is the number of + available CPUs. + + -pre[:stream_specifier] preset_name (output,per-stream) + Specify the preset for matching stream(s). + + -stats (global) + Print encoding progress/statistics. It is on by default, to + explicitly disable it you need to specify "-nostats". + + -stats_period time (global) + Set period at which encoding progress/statistics are updated. + Default is 0.5 seconds. + + -progress url (global) + Send program-friendly progress information to url. + + Progress information is written periodically and at the end of the + encoding process. It is made of "key=value" lines. key consists of + only alphanumeric characters. The last key of a sequence of + progress information is always "progress". + + The update period is set using "-stats_period". + + -stdin + Enable interaction on standard input. On by default unless standard + input is used as an input. To explicitly disable interaction you + need to specify "-nostdin". + + Disabling interaction on standard input is useful, for example, if + ffmpeg is in the background process group. Roughly the same result + can be achieved with "ffmpeg ... < /dev/null" but it requires a + shell. + + -debug_ts (global) + Print timestamp information. It is off by default. This option is + mostly useful for testing and debugging purposes, and the output + format may change from one version to another, so it should not be + employed by portable scripts. + + See also the option "-fdebug ts". + + -attach filename (output) + Add an attachment to the output file. This is supported by a few + formats like Matroska for e.g. fonts used in rendering subtitles. + Attachments are implemented as a specific type of stream, so this + option will add a new stream to the file. It is then possible to + use per-stream options on this stream in the usual way. Attachment + streams created with this option will be created after all the + other streams (i.e. those created with "-map" or automatic + mappings). + + Note that for Matroska you also have to set the mimetype metadata + tag: + + ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv + + (assuming that the attachment stream will be third in the output + file). + + -dump_attachment[:stream_specifier] filename (input,per-stream) + Extract the matching attachment stream into a file named filename. + If filename is empty, then the value of the "filename" metadata tag + will be used. + + E.g. to extract the first attachment to a file named 'out.ttf': + + ffmpeg -dump_attachment:t:0 out.ttf -i INPUT + + To extract all attachments to files determined by the "filename" + tag: + + ffmpeg -dump_attachment:t "" -i INPUT + + Technical note -- attachments are implemented as codec extradata, + so this option can actually be used to extract extradata from any + stream, not just attachments. + + Video Options + -vframes number (output) + Set the number of video frames to output. This is an obsolete alias + for "-frames:v", which you should use instead. + + -r[:stream_specifier] fps (input/output,per-stream) + Set frame rate (Hz value, fraction or abbreviation). + + As an input option, ignore any timestamps stored in the file and + instead generate timestamps assuming constant frame rate fps. This + is not the same as the -framerate option used for some input + formats like image2 or v4l2 (it used to be the same in older + versions of FFmpeg). If in doubt use -framerate instead of the + input option -r. + + As an output option, duplicate or drop input frames to achieve + constant output frame rate fps. + + -fpsmax[:stream_specifier] fps (output,per-stream) + Set maximum frame rate (Hz value, fraction or abbreviation). + + Clamps output frame rate when output framerate is auto-set and is + higher than this value. Useful in batch processing or when input + framerate is wrongly detected as very high. It cannot be set + together with "-r". It is ignored during streamcopy. + + -s[:stream_specifier] size (input/output,per-stream) + Set frame size. + + As an input option, this is a shortcut for the video_size private + option, recognized by some demuxers for which the frame size is + either not stored in the file or is configurable -- e.g. raw video + or video grabbers. + + As an output option, this inserts the "scale" video filter to the + end of the corresponding filtergraph. Please use the "scale" filter + directly to insert it at the beginning or some other place. + + The format is wxh (default - same as source). + + -aspect[:stream_specifier] aspect (output,per-stream) + Set the video display aspect ratio specified by aspect. + + aspect can be a floating point number string, or a string of the + form num:den, where num and den are the numerator and denominator + of the aspect ratio. For example "4:3", "16:9", "1.3333", and + "1.7777" are valid argument values. + + If used together with -vcodec copy, it will affect the aspect ratio + stored at container level, but not the aspect ratio stored in + encoded frames, if it exists. + + -vn (input/output) + As an input option, blocks all video streams of a file from being + filtered or being automatically selected or mapped for any output. + See "-discard" option to disable streams individually. + + As an output option, disables video recording i.e. automatic + selection or mapping of any video stream. For full manual control + see the "-map" option. + + -vcodec codec (output) + Set the video codec. This is an alias for "-codec:v". + + -pass[:stream_specifier] n (output,per-stream) + Select the pass number (1 or 2). It is used to do two-pass video + encoding. The statistics of the video are recorded in the first + pass into a log file (see also the option -passlogfile), and in the + second pass that log file is used to generate the video at the + exact requested bitrate. On pass 1, you may just deactivate audio + and set output to null, examples for Windows and Unix: + + ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL + ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null + + -passlogfile[:stream_specifier] prefix (output,per-stream) + Set two-pass log file name prefix to prefix, the default file name + prefix is ``ffmpeg2pass''. The complete file name will be + PREFIX-N.log, where N is a number specific to the output stream + + -vf filtergraph (output) + Create the filtergraph specified by filtergraph and use it to + filter the stream. + + This is an alias for "-filter:v", see the -filter option. + + -autorotate + Automatically rotate the video according to file metadata. Enabled + by default, use -noautorotate to disable it. + + -autoscale + Automatically scale the video according to the resolution of first + frame. Enabled by default, use -noautoscale to disable it. When + autoscale is disabled, all output frames of filter graph might not + be in the same resolution and may be inadequate for some + encoder/muxer. Therefore, it is not recommended to disable it + unless you really know what you are doing. Disable autoscale at + your own risk. + + Advanced Video options + -pix_fmt[:stream_specifier] format (input/output,per-stream) + Set pixel format. Use "-pix_fmts" to show all the supported pixel + formats. If the selected pixel format can not be selected, ffmpeg + will print a warning and select the best pixel format supported by + the encoder. If pix_fmt is prefixed by a "+", ffmpeg will exit + with an error if the requested pixel format can not be selected, + and automatic conversions inside filtergraphs are disabled. If + pix_fmt is a single "+", ffmpeg selects the same pixel format as + the input (or graph output) and automatic conversions are disabled. + + -sws_flags flags (input/output) + Set SwScaler flags. + + -rc_override[:stream_specifier] override (output,per-stream) + Rate control override for specific intervals, formatted as + "int,int,int" list separated with slashes. Two first values are the + beginning and end frame numbers, last one is quantizer to use if + positive, or quality factor if negative. + + -ilme + Force interlacing support in encoder (MPEG-2 and MPEG-4 only). Use + this option if your input file is interlaced and you want to keep + the interlaced format for minimum losses. The alternative is to + deinterlace the input stream by use of a filter such as "yadif" or + "bwdif", but deinterlacing introduces losses. + + -psnr + Calculate PSNR of compressed frames. + + -vstats + Dump video coding statistics to vstats_HHMMSS.log. + + -vstats_file file + Dump video coding statistics to file. + + -vstats_version file + Specifies which version of the vstats format to use. Default is 2. + + version = 1 : + + "frame= %5d q= %2.1f PSNR= %6.2f f_size= %6d s_size= %8.0fkB time= + %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s" + + version > 1: + + "out= %2d st= %2d frame= %5d q= %2.1f PSNR= %6.2f f_size= %6d + s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s" + + -top[:stream_specifier] n (output,per-stream) + top=1/bottom=0/auto=-1 field first + + -dc precision + Intra_dc_precision. + + -vtag fourcc/tag (output) + Force video tag/fourcc. This is an alias for "-tag:v". + + -qphist (global) + Show QP histogram + + -vbsf bitstream_filter + Deprecated see -bsf + + -force_key_frames[:stream_specifier] time[,time...] (output,per-stream) + -force_key_frames[:stream_specifier] expr:expr (output,per-stream) + -force_key_frames[:stream_specifier] source (output,per-stream) + force_key_frames can take arguments of the following form: + + time[,time...] + If the argument consists of timestamps, ffmpeg will round the + specified times to the nearest output timestamp as per the + encoder time base and force a keyframe at the first frame + having timestamp equal or greater than the computed timestamp. + Note that if the encoder time base is too coarse, then the + keyframes may be forced on frames with timestamps lower than + the specified time. The default encoder time base is the + inverse of the output framerate but may be set otherwise via + "-enc_time_base". + + If one of the times is ""chapters"[delta]", it is expanded into + the time of the beginning of all chapters in the file, shifted + by delta, expressed as a time in seconds. This option can be + useful to ensure that a seek point is present at a chapter mark + or any other designated place in the output file. + + For example, to insert a key frame at 5 minutes, plus key + frames 0.1 second before the beginning of every chapter: + + -force_key_frames 0:05:00,chapters-0.1 + + expr:expr + If the argument is prefixed with "expr:", the string expr is + interpreted like an expression and is evaluated for each frame. + A key frame is forced in case the evaluation is non-zero. + + The expression in expr can contain the following constants: + + n the number of current processed frame, starting from 0 + + n_forced + the number of forced frames + + prev_forced_n + the number of the previous forced frame, it is "NAN" when + no keyframe was forced yet + + prev_forced_t + the time of the previous forced frame, it is "NAN" when no + keyframe was forced yet + + t the time of the current processed frame + + For example to force a key frame every 5 seconds, you can + specify: + + -force_key_frames expr:gte(t,n_forced*5) + + To force a key frame 5 seconds after the time of the last + forced one, starting from second 13: + + -force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5)) + + source + If the argument is "source", ffmpeg will force a key frame if + the current frame being encoded is marked as a key frame in its + source. + + Note that forcing too many keyframes is very harmful for the + lookahead algorithms of certain encoders: using fixed-GOP options + or similar would be more efficient. + + -copyinkf[:stream_specifier] (output,per-stream) + When doing stream copy, copy also non-key frames found at the + beginning. + + -init_hw_device type[=name][:device[,key=value...]] + Initialise a new hardware device of type type called name, using + the given device parameters. If no name is specified it will + receive a default name of the form "type%d". + + The meaning of device and the following arguments depends on the + device type: + + cuda + device is the number of the CUDA device. + + dxva2 + device is the number of the Direct3D 9 display adapter. + + vaapi + device is either an X11 display name or a DRM render node. If + not specified, it will attempt to open the default X11 display + ($DISPLAY) and then the first DRM render node + (/dev/dri/renderD128). + + vdpau + device is an X11 display name. If not specified, it will + attempt to open the default X11 display ($DISPLAY). + + qsv device selects a value in MFX_IMPL_*. Allowed values are: + + auto + sw + hw + auto_any + hw_any + hw2 + hw3 + hw4 + + If not specified, auto_any is used. (Note that it may be + easier to achieve the desired result for QSV by creating the + platform-appropriate subdevice (dxva2 or vaapi) and then + deriving a QSV device from that.) + + opencl + device selects the platform and device as + platform_index.device_index. + + The set of devices can also be filtered using the key-value + pairs to find only devices matching particular platform or + device strings. + + The strings usable as filters are: + + platform_profile + platform_version + platform_name + platform_vendor + platform_extensions + device_name + device_vendor + driver_version + device_version + device_profile + device_extensions + device_type + + The indices and filters must together uniquely select a device. + + Examples: + + -init_hw_device opencl:0.1 + Choose the second device on the first platform. + + -init_hw_device opencl:,device_name=Foo9000 + Choose the device with a name containing the string + Foo9000. + + -init_hw_device + opencl:1,device_type=gpu,device_extensions=cl_khr_fp16 + Choose the GPU device on the second platform supporting the + cl_khr_fp16 extension. + + vulkan + If device is an integer, it selects the device by its index in + a system-dependent list of devices. If device is any other + string, it selects the first device with a name containing that + string as a substring. + + The following options are recognized: + + debug + If set to 1, enables the validation layer, if installed. + + linear_images + If set to 1, images allocated by the hwcontext will be + linear and locally mappable. + + instance_extensions + A plus separated list of additional instance extensions to + enable. + + device_extensions + A plus separated list of additional device extensions to + enable. + + Examples: + + -init_hw_device vulkan:1 + Choose the second device on the system. + + -init_hw_device vulkan:RADV + Choose the first device with a name containing the string + RADV. + + -init_hw_device + vulkan:0,instance_extensions=VK_KHR_wayland_surface+VK_KHR_xcb_surface + Choose the first device and enable the Wayland and XCB + instance extensions. + + -init_hw_device type[=name]@source + Initialise a new hardware device of type type called name, deriving + it from the existing device with the name source. + + -init_hw_device list + List all hardware device types supported in this build of ffmpeg. + + -filter_hw_device name + Pass the hardware device called name to all filters in any filter + graph. This can be used to set the device to upload to with the + "hwupload" filter, or the device to map to with the "hwmap" filter. + Other filters may also make use of this parameter when they require + a hardware device. Note that this is typically only required when + the input is not already in hardware frames - when it is, filters + will derive the device they require from the context of the frames + they receive as input. + + This is a global setting, so all filters will receive the same + device. + + -hwaccel[:stream_specifier] hwaccel (input,per-stream) + Use hardware acceleration to decode the matching stream(s). The + allowed values of hwaccel are: + + none + Do not use any hardware acceleration (the default). + + auto + Automatically select the hardware acceleration method. + + vdpau + Use VDPAU (Video Decode and Presentation API for Unix) hardware + acceleration. + + dxva2 + Use DXVA2 (DirectX Video Acceleration) hardware acceleration. + + vaapi + Use VAAPI (Video Acceleration API) hardware acceleration. + + qsv Use the Intel QuickSync Video acceleration for video + transcoding. + + Unlike most other values, this option does not enable + accelerated decoding (that is used automatically whenever a qsv + decoder is selected), but accelerated transcoding, without + copying the frames into the system memory. + + For it to work, both the decoder and the encoder must support + QSV acceleration and no filters must be used. + + This option has no effect if the selected hwaccel is not available + or not supported by the chosen decoder. + + Note that most acceleration methods are intended for playback and + will not be faster than software decoding on modern CPUs. + Additionally, ffmpeg will usually need to copy the decoded frames + from the GPU memory into the system memory, resulting in further + performance loss. This option is thus mainly useful for testing. + + -hwaccel_device[:stream_specifier] hwaccel_device (input,per-stream) + Select a device to use for hardware acceleration. + + This option only makes sense when the -hwaccel option is also + specified. It can either refer to an existing device created with + -init_hw_device by name, or it can create a new device as if + -init_hw_device type:hwaccel_device were called immediately before. + + -hwaccels + List all hardware acceleration methods supported in this build of + ffmpeg. + + Audio Options + -aframes number (output) + Set the number of audio frames to output. This is an obsolete alias + for "-frames:a", which you should use instead. + + -ar[:stream_specifier] freq (input/output,per-stream) + Set the audio sampling frequency. For output streams it is set by + default to the frequency of the corresponding input stream. For + input streams this option only makes sense for audio grabbing + devices and raw demuxers and is mapped to the corresponding demuxer + options. + + -aq q (output) + Set the audio quality (codec-specific, VBR). This is an alias for + -q:a. + + -ac[:stream_specifier] channels (input/output,per-stream) + Set the number of audio channels. For output streams it is set by + default to the number of input audio channels. For input streams + this option only makes sense for audio grabbing devices and raw + demuxers and is mapped to the corresponding demuxer options. + + -an (input/output) + As an input option, blocks all audio streams of a file from being + filtered or being automatically selected or mapped for any output. + See "-discard" option to disable streams individually. + + As an output option, disables audio recording i.e. automatic + selection or mapping of any audio stream. For full manual control + see the "-map" option. + + -acodec codec (input/output) + Set the audio codec. This is an alias for "-codec:a". + + -sample_fmt[:stream_specifier] sample_fmt (output,per-stream) + Set the audio sample format. Use "-sample_fmts" to get a list of + supported sample formats. + + -af filtergraph (output) + Create the filtergraph specified by filtergraph and use it to + filter the stream. + + This is an alias for "-filter:a", see the -filter option. + + Advanced Audio options + -atag fourcc/tag (output) + Force audio tag/fourcc. This is an alias for "-tag:a". + + -absf bitstream_filter + Deprecated, see -bsf + + -guess_layout_max channels (input,per-stream) + If some input channel layout is not known, try to guess only if it + corresponds to at most the specified number of channels. For + example, 2 tells to ffmpeg to recognize 1 channel as mono and 2 + channels as stereo but not 6 channels as 5.1. The default is to + always try to guess. Use 0 to disable all guessing. + + Subtitle options + -scodec codec (input/output) + Set the subtitle codec. This is an alias for "-codec:s". + + -sn (input/output) + As an input option, blocks all subtitle streams of a file from + being filtered or being automatically selected or mapped for any + output. See "-discard" option to disable streams individually. + + As an output option, disables subtitle recording i.e. automatic + selection or mapping of any subtitle stream. For full manual + control see the "-map" option. + + -sbsf bitstream_filter + Deprecated, see -bsf + + Advanced Subtitle options + -fix_sub_duration + Fix subtitles durations. For each subtitle, wait for the next + packet in the same stream and adjust the duration of the first to + avoid overlap. This is necessary with some subtitles codecs, + especially DVB subtitles, because the duration in the original + packet is only a rough estimate and the end is actually marked by + an empty subtitle frame. Failing to use this option when necessary + can result in exaggerated durations or muxing failures due to non- + monotonic timestamps. + + Note that this option will delay the output of all data until the + next subtitle packet is decoded: it may increase memory consumption + and latency a lot. + + -canvas_size size + Set the size of the canvas used to render subtitles. + + Advanced options + -map + [-]input_file_id[:stream_specifier][?][,sync_file_id[:stream_specifier]] + | [linklabel] (output) + Designate one or more input streams as a source for the output + file. Each input stream is identified by the input file index + input_file_id and the input stream index input_stream_id within the + input file. Both indices start at 0. If specified, + sync_file_id:stream_specifier sets which input stream is used as a + presentation sync reference. + + The first "-map" option on the command line specifies the source + for output stream 0, the second "-map" option specifies the source + for output stream 1, etc. + + A "-" character before the stream identifier creates a "negative" + mapping. It disables matching streams from already created + mappings. + + A trailing "?" after the stream index will allow the map to be + optional: if the map matches no streams the map will be ignored + instead of failing. Note the map will still fail if an invalid + input file index is used; such as if the map refers to a non- + existent input. + + An alternative [linklabel] form will map outputs from complex + filter graphs (see the -filter_complex option) to the output file. + linklabel must correspond to a defined output link label in the + graph. + + For example, to map ALL streams from the first input file to output + + ffmpeg -i INPUT -map 0 output + + For example, if you have two audio streams in the first input file, + these streams are identified by "0:0" and "0:1". You can use "-map" + to select which streams to place in an output file. For example: + + ffmpeg -i INPUT -map 0:1 out.wav + + will map the input stream in INPUT identified by "0:1" to the + (single) output stream in out.wav. + + For example, to select the stream with index 2 from input file + a.mov (specified by the identifier "0:2"), and stream with index 6 + from input b.mov (specified by the identifier "1:6"), and copy them + to the output file out.mov: + + ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov + + To select all video and the third audio stream from an input file: + + ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT + + To map all the streams except the second audio, use negative + mappings + + ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT + + To map the video and audio streams from the first input, and using + the trailing "?", ignore the audio mapping if no audio streams + exist in the first input: + + ffmpeg -i INPUT -map 0:v -map 0:a? OUTPUT + + To pick the English audio stream: + + ffmpeg -i INPUT -map 0:m:language:eng OUTPUT + + Note that using this option disables the default mappings for this + output file. + + -ignore_unknown + Ignore input streams with unknown type instead of failing if + copying such streams is attempted. + + -copy_unknown + Allow input streams with unknown type to be copied instead of + failing if copying such streams is attempted. + + -map_channel + [input_file_id.stream_specifier.channel_id|-1][?][:output_file_id.stream_specifier] + Map an audio channel from a given input to an output. If + output_file_id.stream_specifier is not set, the audio channel will + be mapped on all the audio streams. + + Using "-1" instead of input_file_id.stream_specifier.channel_id + will map a muted channel. + + A trailing "?" will allow the map_channel to be optional: if the + map_channel matches no channel the map_channel will be ignored + instead of failing. + + For example, assuming INPUT is a stereo audio file, you can switch + the two audio channels with the following command: + + ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT + + If you want to mute the first channel and keep the second: + + ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT + + The order of the "-map_channel" option specifies the order of the + channels in the output stream. The output channel layout is guessed + from the number of channels mapped (mono if one "-map_channel", + stereo if two, etc.). Using "-ac" in combination of "-map_channel" + makes the channel gain levels to be updated if input and output + channel layouts don't match (for instance two "-map_channel" + options and "-ac 6"). + + You can also extract each channel of an input to specific outputs; + the following command extracts two channels of the INPUT audio + stream (file 0, stream 0) to the respective OUTPUT_CH0 and + OUTPUT_CH1 outputs: + + ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1 + + The following example splits the channels of a stereo input into + two separate streams, which are put into the same output file: + + ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg + + Note that currently each output stream can only contain channels + from a single input stream; you can't for example use + "-map_channel" to pick multiple input audio channels contained in + different streams (from the same or different files) and merge them + into a single output stream. It is therefore not currently + possible, for example, to turn two separate mono streams into a + single stereo stream. However splitting a stereo stream into two + single channel mono streams is possible. + + If you need this feature, a possible workaround is to use the + amerge filter. For example, if you need to merge a media (here + input.mkv) with 2 mono audio streams into one single stereo channel + audio stream (and keep the video stream), you can use the following + command: + + ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv + + To map the first two audio channels from the first input, and using + the trailing "?", ignore the audio channel mapping if the first + input is mono instead of stereo: + + ffmpeg -i INPUT -map_channel 0.0.0 -map_channel 0.0.1? OUTPUT + + -map_metadata[:metadata_spec_out] infile[:metadata_spec_in] + (output,per-metadata) + Set metadata information of the next output file from infile. Note + that those are file indices (zero-based), not filenames. Optional + metadata_spec_in/out parameters specify, which metadata to copy. A + metadata specifier can have the following forms: + + g global metadata, i.e. metadata that applies to the whole file + + s[:stream_spec] + per-stream metadata. stream_spec is a stream specifier as + described in the Stream specifiers chapter. In an input + metadata specifier, the first matching stream is copied from. + In an output metadata specifier, all matching streams are + copied to. + + c:chapter_index + per-chapter metadata. chapter_index is the zero-based chapter + index. + + p:program_index + per-program metadata. program_index is the zero-based program + index. + + If metadata specifier is omitted, it defaults to global. + + By default, global metadata is copied from the first input file, + per-stream and per-chapter metadata is copied along with + streams/chapters. These default mappings are disabled by creating + any mapping of the relevant type. A negative file index can be used + to create a dummy mapping that just disables automatic copying. + + For example to copy metadata from the first stream of the input + file to global metadata of the output file: + + ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3 + + To do the reverse, i.e. copy global metadata to all audio streams: + + ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv + + Note that simple 0 would work as well in this example, since global + metadata is assumed by default. + + -map_chapters input_file_index (output) + Copy chapters from input file with index input_file_index to the + next output file. If no chapter mapping is specified, then chapters + are copied from the first input file with at least one chapter. Use + a negative file index to disable any chapter copying. + + -benchmark (global) + Show benchmarking information at the end of an encode. Shows real, + system and user time used and maximum memory consumption. Maximum + memory consumption is not supported on all systems, it will usually + display as 0 if not supported. + + -benchmark_all (global) + Show benchmarking information during the encode. Shows real, + system and user time used in various steps (audio/video + encode/decode). + + -timelimit duration (global) + Exit after ffmpeg has been running for duration seconds in CPU user + time. + + -dump (global) + Dump each input packet to stderr. + + -hex (global) + When dumping packets, also dump the payload. + + -re (input) + Read input at native frame rate. Mainly used to simulate a grab + device, or live input stream (e.g. when reading from a file). + Should not be used with actual grab devices or live input streams + (where it can cause packet loss). By default ffmpeg attempts to + read the input(s) as fast as possible. This option will slow down + the reading of the input(s) to the native frame rate of the + input(s). It is useful for real-time output (e.g. live streaming). + + -vsync parameter + Video sync method. For compatibility reasons old values can be + specified as numbers. Newly added values will have to be specified + as strings always. + + 0, passthrough + Each frame is passed with its timestamp from the demuxer to the + muxer. + + 1, cfr + Frames will be duplicated and dropped to achieve exactly the + requested constant frame rate. + + 2, vfr + Frames are passed through with their timestamp or dropped so as + to prevent 2 frames from having the same timestamp. + + drop + As passthrough but destroys all timestamps, making the muxer + generate fresh timestamps based on frame-rate. + + -1, auto + Chooses between 1 and 2 depending on muxer capabilities. This + is the default method. + + Note that the timestamps may be further modified by the muxer, + after this. For example, in the case that the format option + avoid_negative_ts is enabled. + + With -map you can select from which stream the timestamps should be + taken. You can leave either video or audio unchanged and sync the + remaining stream(s) to the unchanged one. + + -frame_drop_threshold parameter + Frame drop threshold, which specifies how much behind video frames + can be before they are dropped. In frame rate units, so 1.0 is one + frame. The default is -1.1. One possible usecase is to avoid + framedrops in case of noisy timestamps or to increase frame drop + precision in case of exact timestamps. + + -async samples_per_second + Audio sync method. "Stretches/squeezes" the audio stream to match + the timestamps, the parameter is the maximum samples per second by + which the audio is changed. -async 1 is a special case where only + the start of the audio stream is corrected without any later + correction. + + Note that the timestamps may be further modified by the muxer, + after this. For example, in the case that the format option + avoid_negative_ts is enabled. + + This option has been deprecated. Use the "aresample" audio filter + instead. + + -adrift_threshold time + Set the minimum difference between timestamps and audio data (in + seconds) to trigger adding/dropping samples to make it match the + timestamps. This option effectively is a threshold to select + between hard (add/drop) and soft (squeeze/stretch) compensation. + "-async" must be set to a positive value. + + -apad parameters (output,per-stream) + Pad the output audio stream(s). This is the same as applying "-af + apad". Argument is a string of filter parameters composed the same + as with the "apad" filter. "-shortest" must be set for this output + for the option to take effect. + + -copyts + Do not process input timestamps, but keep their values without + trying to sanitize them. In particular, do not remove the initial + start time offset value. + + Note that, depending on the vsync option or on specific muxer + processing (e.g. in case the format option avoid_negative_ts is + enabled) the output timestamps may mismatch with the input + timestamps even when this option is selected. + + -start_at_zero + When used with copyts, shift input timestamps so they start at + zero. + + This means that using e.g. "-ss 50" will make output timestamps + start at 50 seconds, regardless of what timestamp the input file + started at. + + -copytb mode + Specify how to set the encoder timebase when stream copying. mode + is an integer numeric value, and can assume one of the following + values: + + 1 Use the demuxer timebase. + + The time base is copied to the output encoder from the + corresponding input demuxer. This is sometimes required to + avoid non monotonically increasing timestamps when copying + video streams with variable frame rate. + + 0 Use the decoder timebase. + + The time base is copied to the output encoder from the + corresponding input decoder. + + -1 Try to make the choice automatically, in order to generate a + sane output. + + Default value is -1. + + -enc_time_base[:stream_specifier] timebase (output,per-stream) + Set the encoder timebase. timebase is a floating point number, and + can assume one of the following values: + + 0 Assign a default value according to the media type. + + For video - use 1/framerate, for audio - use 1/samplerate. + + -1 Use the input stream timebase when possible. + + If an input stream is not available, the default timebase will + be used. + + >0 Use the provided number as the timebase. + + This field can be provided as a ratio of two integers (e.g. + 1:24, 1:48000) or as a floating point number (e.g. 0.04166, + 2.0833e-5) + + Default value is 0. + + -bitexact (input/output) + Enable bitexact mode for (de)muxer and (de/en)coder + + -shortest (output) + Finish encoding when the shortest input stream ends. + + -dts_delta_threshold + Timestamp discontinuity delta threshold. + + -dts_error_threshold seconds + Timestamp error delta threshold. This threshold use to discard + crazy/damaged timestamps and the default is 30 hours which is + arbitrarily picked and quite conservative. + + -muxdelay seconds (output) + Set the maximum demux-decode delay. + + -muxpreload seconds (output) + Set the initial demux-decode delay. + + -streamid output-stream-index:new-value (output) + Assign a new stream-id value to an output stream. This option + should be specified prior to the output filename to which it + applies. For the situation where multiple output files exist, a + streamid may be reassigned to a different value. + + For example, to set the stream 0 PID to 33 and the stream 1 PID to + 36 for an output mpegts file: + + ffmpeg -i inurl -streamid 0:33 -streamid 1:36 out.ts + + -bsf[:stream_specifier] bitstream_filters (output,per-stream) + Set bitstream filters for matching streams. bitstream_filters is a + comma-separated list of bitstream filters. Use the "-bsfs" option + to get the list of bitstream filters. + + ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264 + + ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt + + -tag[:stream_specifier] codec_tag (input/output,per-stream) + Force a tag/fourcc for matching streams. + + -timecode hh:mm:ssSEPff + Specify Timecode for writing. SEP is ':' for non drop timecode and + ';' (or '.') for drop. + + ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg + + -filter_complex filtergraph (global) + Define a complex filtergraph, i.e. one with arbitrary number of + inputs and/or outputs. For simple graphs -- those with one input + and one output of the same type -- see the -filter options. + filtergraph is a description of the filtergraph, as described in + the ``Filtergraph syntax'' section of the ffmpeg-filters manual. + + Input link labels must refer to input streams using the + "[file_index:stream_specifier]" syntax (i.e. the same as -map + uses). If stream_specifier matches multiple streams, the first one + will be used. An unlabeled input will be connected to the first + unused input stream of the matching type. + + Output link labels are referred to with -map. Unlabeled outputs are + added to the first output file. + + Note that with this option it is possible to use only lavfi sources + without normal input files. + + For example, to overlay an image over video + + ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map + '[out]' out.mkv + + Here "[0:v]" refers to the first video stream in the first input + file, which is linked to the first (main) input of the overlay + filter. Similarly the first video stream in the second input is + linked to the second (overlay) input of overlay. + + Assuming there is only one video stream in each input file, we can + omit input labels, so the above is equivalent to + + ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map + '[out]' out.mkv + + Furthermore we can omit the output label and the single output from + the filter graph will be added to the output file automatically, so + we can simply write + + ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv + + As a special exception, you can use a bitmap subtitle stream as + input: it will be converted into a video with the same size as the + largest video in the file, or 720x576 if no video is present. Note + that this is an experimental and temporary solution. It will be + removed once libavfilter has proper support for subtitles. + + For example, to hardcode subtitles on top of a DVB-T recording + stored in MPEG-TS format, delaying the subtitles by 1 second: + + ffmpeg -i input.ts -filter_complex \ + '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \ + -sn -map '#0x2dc' output.mkv + + (0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the + video, audio and subtitles streams; 0:0, 0:3 and 0:7 would have + worked too) + + To generate 5 seconds of pure red video using lavfi "color" source: + + ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv + + -filter_complex_threads nb_threads (global) + Defines how many threads are used to process a filter_complex + graph. Similar to filter_threads but used for "-filter_complex" + graphs only. The default is the number of available CPUs. + + -lavfi filtergraph (global) + Define a complex filtergraph, i.e. one with arbitrary number of + inputs and/or outputs. Equivalent to -filter_complex. + + -filter_complex_script filename (global) + This option is similar to -filter_complex, the only difference is + that its argument is the name of the file from which a complex + filtergraph description is to be read. + + -accurate_seek (input) + This option enables or disables accurate seeking in input files + with the -ss option. It is enabled by default, so seeking is + accurate when transcoding. Use -noaccurate_seek to disable it, + which may be useful e.g. when copying some streams and transcoding + the others. + + -seek_timestamp (input) + This option enables or disables seeking by timestamp in input files + with the -ss option. It is disabled by default. If enabled, the + argument to the -ss option is considered an actual timestamp, and + is not offset by the start time of the file. This matters only for + files which do not start from timestamp 0, such as transport + streams. + + -thread_queue_size size (input) + This option sets the maximum number of queued packets when reading + from the file or device. With low latency / high rate live streams, + packets may be discarded if they are not read in a timely manner; + setting this value can force ffmpeg to use a separate input thread + and read packets as soon as they arrive. By default ffmpeg only do + this if multiple inputs are specified. + + -sdp_file file (global) + Print sdp information for an output stream to file. This allows + dumping sdp information when at least one output isn't an rtp + stream. (Requires at least one of the output formats to be rtp). + + -discard (input) + Allows discarding specific streams or frames from streams. Any + input stream can be fully discarded, using value "all" whereas + selective discarding of frames from a stream occurs at the demuxer + and is not supported by all demuxers. + + none + Discard no frame. + + default + Default, which discards no frames. + + noref + Discard all non-reference frames. + + bidir + Discard all bidirectional frames. + + nokey + Discard all frames excepts keyframes. + + all Discard all frames. + + -abort_on flags (global) + Stop and abort on various conditions. The following flags are + available: + + empty_output + No packets were passed to the muxer, the output is empty. + + empty_output_stream + No packets were passed to the muxer in some of the output + streams. + + -max_error_rate (global) + Set fraction of decoding frame failures across all inputs which + when crossed ffmpeg will return exit code 69. Crossing this + threshold does not terminate processing. Range is a floating-point + number between 0 to 1. Default is 2/3. + + -xerror (global) + Stop and exit on error + + -max_muxing_queue_size packets (output,per-stream) + When transcoding audio and/or video streams, ffmpeg will not begin + writing into the output until it has one packet for each such + stream. While waiting for that to happen, packets for other streams + are buffered. This option sets the size of this buffer, in packets, + for the matching output stream. + + The default value of this option should be high enough for most + uses, so only touch this option if you are sure that you need it. + + -muxing_queue_data_threshold bytes (output,per-stream) + This is a minimum threshold until which the muxing queue size is + not taken into account. Defaults to 50 megabytes per stream, and is + based on the overall size of packets passed to the muxer. + + -auto_conversion_filters (global) + Enable automatically inserting format conversion filters in all + filter graphs, including those defined by -vf, -af, -filter_complex + and -lavfi. If filter format negotiation requires a conversion, the + initialization of the filters will fail. Conversions can still be + performed by inserting the relevant conversion filter (scale, + aresample) in the graph. On by default, to explicitly disable it + you need to specify "-noauto_conversion_filters". + + Preset files + A preset file contains a sequence of option=value pairs, one for each + line, specifying a sequence of options which would be awkward to + specify on the command line. Lines starting with the hash ('#') + character are ignored and are used to provide comments. Check the + presets directory in the FFmpeg source tree for examples. + + There are two types of preset files: ffpreset and avpreset files. + + ffpreset files + + ffpreset files are specified with the "vpre", "apre", "spre", and + "fpre" options. The "fpre" option takes the filename of the preset + instead of a preset name as input and can be used for any kind of + codec. For the "vpre", "apre", and "spre" options, the options + specified in a preset file are applied to the currently selected codec + of the same type as the preset option. + + The argument passed to the "vpre", "apre", and "spre" preset options + identifies the preset file to use according to the following rules: + + First ffmpeg searches for a file named arg.ffpreset in the directories + $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg, and in the datadir defined + at configuration time (usually PREFIX/share/ffmpeg) or in a ffpresets + folder along the executable on win32, in that order. For example, if + the argument is "libvpx-1080p", it will search for the file + libvpx-1080p.ffpreset. + + If no such file is found, then ffmpeg will search for a file named + codec_name-arg.ffpreset in the above-mentioned directories, where + codec_name is the name of the codec to which the preset file options + will be applied. For example, if you select the video codec with + "-vcodec libvpx" and use "-vpre 1080p", then it will search for the + file libvpx-1080p.ffpreset. + + avpreset files + + avpreset files are specified with the "pre" option. They work similar + to ffpreset files, but they only allow encoder- specific options. + Therefore, an option=value pair specifying an encoder cannot be used. + + When the "pre" option is specified, ffmpeg will look for files with the + suffix .avpreset in the directories $AVCONV_DATADIR (if set), and + $HOME/.avconv, and in the datadir defined at configuration time + (usually PREFIX/share/ffmpeg), in that order. + + First ffmpeg searches for a file named codec_name-arg.avpreset in the + above-mentioned directories, where codec_name is the name of the codec + to which the preset file options will be applied. For example, if you + select the video codec with "-vcodec libvpx" and use "-pre 1080p", then + it will search for the file libvpx-1080p.avpreset. + + If no such file is found, then ffmpeg will search for a file named + arg.avpreset in the same directories. + +EXAMPLES + Video and Audio grabbing + If you specify the input format and device then ffmpeg can grab video + and audio directly. + + ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg + + Or with an ALSA audio source (mono input, card id 1) instead of OSS: + + ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg + + Note that you must activate the right video source and channel before + launching ffmpeg with any TV viewer such as + by Gerd Knorr. You also have to set + the audio recording levels correctly with a standard mixer. + + X11 grabbing + Grab the X11 display with ffmpeg via + + ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg + + 0.0 is display.screen number of your X11 server, same as the DISPLAY + environment variable. + + ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg + + 0.0 is display.screen number of your X11 server, same as the DISPLAY + environment variable. 10 is the x-offset and 20 the y-offset for the + grabbing. + + Video and Audio file format conversion + Any supported file format and protocol can serve as input to ffmpeg: + + Examples: + + o You can use YUV files as input: + + ffmpeg -i /tmp/test%d.Y /tmp/out.mpg + + It will use the files: + + /tmp/test0.Y, /tmp/test0.U, /tmp/test0.V, + /tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc... + + The Y files use twice the resolution of the U and V files. They are + raw files, without header. They can be generated by all decent + video decoders. You must specify the size of the image with the -s + option if ffmpeg cannot guess it. + + o You can input from a raw YUV420P file: + + ffmpeg -i /tmp/test.yuv /tmp/out.avi + + test.yuv is a file containing raw YUV planar data. Each frame is + composed of the Y plane followed by the U and V planes at half + vertical and horizontal resolution. + + o You can output to a raw YUV420P file: + + ffmpeg -i mydivx.avi hugefile.yuv + + o You can set several input files and output files: + + ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg + + Converts the audio file a.wav and the raw YUV video file a.yuv to + MPEG file a.mpg. + + o You can also do audio and video conversions at the same time: + + ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2 + + Converts a.wav to MPEG audio at 22050 Hz sample rate. + + o You can encode to several formats at the same time and define a + mapping from input stream to output streams: + + ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2 + + Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. + '-map file:index' specifies which input stream is used for each + output stream, in the order of the definition of output streams. + + o You can transcode decrypted VOBs: + + ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi + + This is a typical DVD ripping example; the input is a VOB file, the + output an AVI file with MPEG-4 video and MP3 audio. Note that in + this command we use B-frames so the MPEG-4 stream is DivX5 + compatible, and GOP size is 300 which means one intra frame every + 10 seconds for 29.97fps input video. Furthermore, the audio stream + is MP3-encoded so you need to enable LAME support by passing + "--enable-libmp3lame" to configure. The mapping is particularly + useful for DVD transcoding to get the desired audio language. + + NOTE: To see the supported input formats, use "ffmpeg -demuxers". + + o You can extract images from a video, or create a video from many + images: + + For extracting images from a video: + + ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg + + This will extract one video frame per second from the video and + will output them in files named foo-001.jpeg, foo-002.jpeg, etc. + Images will be rescaled to fit the new WxH values. + + If you want to extract just a limited number of frames, you can use + the above command in combination with the "-frames:v" or "-t" + option, or in combination with -ss to start extracting from a + certain point in time. + + For creating a video from many images: + + ffmpeg -f image2 -framerate 12 -i foo-%03d.jpeg -s WxH foo.avi + + The syntax "foo-%03d.jpeg" specifies to use a decimal number + composed of three digits padded with zeroes to express the sequence + number. It is the same syntax supported by the C printf function, + but only formats accepting a normal integer are suitable. + + When importing an image sequence, -i also supports expanding shell- + like wildcard patterns (globbing) internally, by selecting the + image2-specific "-pattern_type glob" option. + + For example, for creating a video from filenames matching the glob + pattern "foo-*.jpeg": + + ffmpeg -f image2 -pattern_type glob -framerate 12 -i 'foo-*.jpeg' -s WxH foo.avi + + o You can put many streams of the same type in the output: + + ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut + + The resulting output file test12.nut will contain the first four + streams from the input files in reverse order. + + o To force CBR video output: + + ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v + + o The four options lmin, lmax, mblmin and mblmax use 'lambda' units, + but you may use the QP2LAMBDA constant to easily convert from 'q' + units: + + ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext + +SYNTAX + This section documents the syntax and formats employed by the FFmpeg + libraries and tools. + + Quoting and escaping + FFmpeg adopts the following quoting and escaping mechanism, unless + explicitly specified. The following rules are applied: + + o ' and \ are special characters (respectively used for quoting and + escaping). In addition to them, there might be other special + characters depending on the specific syntax where the escaping and + quoting are employed. + + o A special character is escaped by prefixing it with a \. + + o All characters enclosed between '' are included literally in the + parsed string. The quote character ' itself cannot be quoted, so + you may need to close the quote and escape it. + + o Leading and trailing whitespaces, unless escaped or quoted, are + removed from the parsed string. + + Note that you may need to add a second level of escaping when using the + command line or a script, which depends on the syntax of the adopted + shell language. + + The function "av_get_token" defined in libavutil/avstring.h can be used + to parse a token quoted or escaped according to the rules defined + above. + + The tool tools/ffescape in the FFmpeg source tree can be used to + automatically quote or escape a string in a script. + + Examples + + o Escape the string "Crime d'Amour" containing the "'" special + character: + + Crime d\'Amour + + o The string above contains a quote, so the "'" needs to be escaped + when quoting it: + + 'Crime d'\''Amour' + + o Include leading or trailing whitespaces using quoting: + + ' this string starts and ends with whitespaces ' + + o Escaping and quoting can be mixed together: + + ' The string '\'string\'' is a string ' + + o To include a literal \ you can use either escaping or quoting: + + 'c:\foo' can be written as c:\\foo + + Date + The accepted syntax is: + + [(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z] + now + + If the value is "now" it takes the current time. + + Time is local time unless Z is appended, in which case it is + interpreted as UTC. If the year-month-day part is not specified it + takes the current year-month-day. + + Time duration + There are two accepted syntaxes for expressing time duration. + + [-][:]:[....] + + HH expresses the number of hours, MM the number of minutes for a + maximum of 2 digits, and SS the number of seconds for a maximum of 2 + digits. The m at the end expresses decimal value for SS. + + or + + [-]+[....][s|ms|us] + + S expresses the number of seconds, with the optional decimal part m. + The optional literal suffixes s, ms or us indicate to interpret the + value as seconds, milliseconds or microseconds, respectively. + + In both expressions, the optional - indicates negative duration. + + Examples + + The following examples are all valid time duration: + + 55 55 seconds + + 0.2 0.2 seconds + + 200ms + 200 milliseconds, that's 0.2s + + 200000us + 200000 microseconds, that's 0.2s + + 12:03:45 + 12 hours, 03 minutes and 45 seconds + + 23.189 + 23.189 seconds + + Video size + Specify the size of the sourced video, it may be a string of the form + widthxheight, or the name of a size abbreviation. + + The following abbreviations are recognized: + + ntsc + 720x480 + + pal 720x576 + + qntsc + 352x240 + + qpal + 352x288 + + sntsc + 640x480 + + spal + 768x576 + + film + 352x240 + + ntsc-film + 352x240 + + sqcif + 128x96 + + qcif + 176x144 + + cif 352x288 + + 4cif + 704x576 + + 16cif + 1408x1152 + + qqvga + 160x120 + + qvga + 320x240 + + vga 640x480 + + svga + 800x600 + + xga 1024x768 + + uxga + 1600x1200 + + qxga + 2048x1536 + + sxga + 1280x1024 + + qsxga + 2560x2048 + + hsxga + 5120x4096 + + wvga + 852x480 + + wxga + 1366x768 + + wsxga + 1600x1024 + + wuxga + 1920x1200 + + woxga + 2560x1600 + + wqsxga + 3200x2048 + + wquxga + 3840x2400 + + whsxga + 6400x4096 + + whuxga + 7680x4800 + + cga 320x200 + + ega 640x350 + + hd480 + 852x480 + + hd720 + 1280x720 + + hd1080 + 1920x1080 + + 2k 2048x1080 + + 2kflat + 1998x1080 + + 2kscope + 2048x858 + + 4k 4096x2160 + + 4kflat + 3996x2160 + + 4kscope + 4096x1716 + + nhd 640x360 + + hqvga + 240x160 + + wqvga + 400x240 + + fwqvga + 432x240 + + hvga + 480x320 + + qhd 960x540 + + 2kdci + 2048x1080 + + 4kdci + 4096x2160 + + uhd2160 + 3840x2160 + + uhd4320 + 7680x4320 + + Video rate + Specify the frame rate of a video, expressed as the number of frames + generated per second. It has to be a string in the format + frame_rate_num/frame_rate_den, an integer number, a float number or a + valid video frame rate abbreviation. + + The following abbreviations are recognized: + + ntsc + 30000/1001 + + pal 25/1 + + qntsc + 30000/1001 + + qpal + 25/1 + + sntsc + 30000/1001 + + spal + 25/1 + + film + 24/1 + + ntsc-film + 24000/1001 + + Ratio + A ratio can be expressed as an expression, or in the form + numerator:denominator. + + Note that a ratio with infinite (1/0) or negative value is considered + valid, so you should check on the returned value if you want to exclude + those values. + + The undefined value can be expressed using the "0:0" string. + + Color + It can be the name of a color as defined below (case insensitive match) + or a "[0x|#]RRGGBB[AA]" sequence, possibly followed by @ and a string + representing the alpha component. + + The alpha component may be a string composed by "0x" followed by an + hexadecimal number or a decimal number between 0.0 and 1.0, which + represents the opacity value (0x00 or 0.0 means completely transparent, + 0xff or 1.0 completely opaque). If the alpha component is not specified + then 0xff is assumed. + + The string random will result in a random color. + + The following names of colors are recognized: + + AliceBlue + 0xF0F8FF + + AntiqueWhite + 0xFAEBD7 + + Aqua + 0x00FFFF + + Aquamarine + 0x7FFFD4 + + Azure + 0xF0FFFF + + Beige + 0xF5F5DC + + Bisque + 0xFFE4C4 + + Black + 0x000000 + + BlanchedAlmond + 0xFFEBCD + + Blue + 0x0000FF + + BlueViolet + 0x8A2BE2 + + Brown + 0xA52A2A + + BurlyWood + 0xDEB887 + + CadetBlue + 0x5F9EA0 + + Chartreuse + 0x7FFF00 + + Chocolate + 0xD2691E + + Coral + 0xFF7F50 + + CornflowerBlue + 0x6495ED + + Cornsilk + 0xFFF8DC + + Crimson + 0xDC143C + + Cyan + 0x00FFFF + + DarkBlue + 0x00008B + + DarkCyan + 0x008B8B + + DarkGoldenRod + 0xB8860B + + DarkGray + 0xA9A9A9 + + DarkGreen + 0x006400 + + DarkKhaki + 0xBDB76B + + DarkMagenta + 0x8B008B + + DarkOliveGreen + 0x556B2F + + Darkorange + 0xFF8C00 + + DarkOrchid + 0x9932CC + + DarkRed + 0x8B0000 + + DarkSalmon + 0xE9967A + + DarkSeaGreen + 0x8FBC8F + + DarkSlateBlue + 0x483D8B + + DarkSlateGray + 0x2F4F4F + + DarkTurquoise + 0x00CED1 + + DarkViolet + 0x9400D3 + + DeepPink + 0xFF1493 + + DeepSkyBlue + 0x00BFFF + + DimGray + 0x696969 + + DodgerBlue + 0x1E90FF + + FireBrick + 0xB22222 + + FloralWhite + 0xFFFAF0 + + ForestGreen + 0x228B22 + + Fuchsia + 0xFF00FF + + Gainsboro + 0xDCDCDC + + GhostWhite + 0xF8F8FF + + Gold + 0xFFD700 + + GoldenRod + 0xDAA520 + + Gray + 0x808080 + + Green + 0x008000 + + GreenYellow + 0xADFF2F + + HoneyDew + 0xF0FFF0 + + HotPink + 0xFF69B4 + + IndianRed + 0xCD5C5C + + Indigo + 0x4B0082 + + Ivory + 0xFFFFF0 + + Khaki + 0xF0E68C + + Lavender + 0xE6E6FA + + LavenderBlush + 0xFFF0F5 + + LawnGreen + 0x7CFC00 + + LemonChiffon + 0xFFFACD + + LightBlue + 0xADD8E6 + + LightCoral + 0xF08080 + + LightCyan + 0xE0FFFF + + LightGoldenRodYellow + 0xFAFAD2 + + LightGreen + 0x90EE90 + + LightGrey + 0xD3D3D3 + + LightPink + 0xFFB6C1 + + LightSalmon + 0xFFA07A + + LightSeaGreen + 0x20B2AA + + LightSkyBlue + 0x87CEFA + + LightSlateGray + 0x778899 + + LightSteelBlue + 0xB0C4DE + + LightYellow + 0xFFFFE0 + + Lime + 0x00FF00 + + LimeGreen + 0x32CD32 + + Linen + 0xFAF0E6 + + Magenta + 0xFF00FF + + Maroon + 0x800000 + + MediumAquaMarine + 0x66CDAA + + MediumBlue + 0x0000CD + + MediumOrchid + 0xBA55D3 + + MediumPurple + 0x9370D8 + + MediumSeaGreen + 0x3CB371 + + MediumSlateBlue + 0x7B68EE + + MediumSpringGreen + 0x00FA9A + + MediumTurquoise + 0x48D1CC + + MediumVioletRed + 0xC71585 + + MidnightBlue + 0x191970 + + MintCream + 0xF5FFFA + + MistyRose + 0xFFE4E1 + + Moccasin + 0xFFE4B5 + + NavajoWhite + 0xFFDEAD + + Navy + 0x000080 + + OldLace + 0xFDF5E6 + + Olive + 0x808000 + + OliveDrab + 0x6B8E23 + + Orange + 0xFFA500 + + OrangeRed + 0xFF4500 + + Orchid + 0xDA70D6 + + PaleGoldenRod + 0xEEE8AA + + PaleGreen + 0x98FB98 + + PaleTurquoise + 0xAFEEEE + + PaleVioletRed + 0xD87093 + + PapayaWhip + 0xFFEFD5 + + PeachPuff + 0xFFDAB9 + + Peru + 0xCD853F + + Pink + 0xFFC0CB + + Plum + 0xDDA0DD + + PowderBlue + 0xB0E0E6 + + Purple + 0x800080 + + Red 0xFF0000 + + RosyBrown + 0xBC8F8F + + RoyalBlue + 0x4169E1 + + SaddleBrown + 0x8B4513 + + Salmon + 0xFA8072 + + SandyBrown + 0xF4A460 + + SeaGreen + 0x2E8B57 + + SeaShell + 0xFFF5EE + + Sienna + 0xA0522D + + Silver + 0xC0C0C0 + + SkyBlue + 0x87CEEB + + SlateBlue + 0x6A5ACD + + SlateGray + 0x708090 + + Snow + 0xFFFAFA + + SpringGreen + 0x00FF7F + + SteelBlue + 0x4682B4 + + Tan 0xD2B48C + + Teal + 0x008080 + + Thistle + 0xD8BFD8 + + Tomato + 0xFF6347 + + Turquoise + 0x40E0D0 + + Violet + 0xEE82EE + + Wheat + 0xF5DEB3 + + White + 0xFFFFFF + + WhiteSmoke + 0xF5F5F5 + + Yellow + 0xFFFF00 + + YellowGreen + 0x9ACD32 + + Channel Layout + A channel layout specifies the spatial disposition of the channels in a + multi-channel audio stream. To specify a channel layout, FFmpeg makes + use of a special syntax. + + Individual channels are identified by an id, as given by the table + below: + + FL front left + + FR front right + + FC front center + + LFE low frequency + + BL back left + + BR back right + + FLC front left-of-center + + FRC front right-of-center + + BC back center + + SL side left + + SR side right + + TC top center + + TFL top front left + + TFC top front center + + TFR top front right + + TBL top back left + + TBC top back center + + TBR top back right + + DL downmix left + + DR downmix right + + WL wide left + + WR wide right + + SDL surround direct left + + SDR surround direct right + + LFE2 + low frequency 2 + + Standard channel layout compositions can be specified by using the + following identifiers: + + mono + FC + + stereo + FL+FR + + 2.1 FL+FR+LFE + + 3.0 FL+FR+FC + + 3.0(back) + FL+FR+BC + + 4.0 FL+FR+FC+BC + + quad + FL+FR+BL+BR + + quad(side) + FL+FR+SL+SR + + 3.1 FL+FR+FC+LFE + + 5.0 FL+FR+FC+BL+BR + + 5.0(side) + FL+FR+FC+SL+SR + + 4.1 FL+FR+FC+LFE+BC + + 5.1 FL+FR+FC+LFE+BL+BR + + 5.1(side) + FL+FR+FC+LFE+SL+SR + + 6.0 FL+FR+FC+BC+SL+SR + + 6.0(front) + FL+FR+FLC+FRC+SL+SR + + hexagonal + FL+FR+FC+BL+BR+BC + + 6.1 FL+FR+FC+LFE+BC+SL+SR + + 6.1 FL+FR+FC+LFE+BL+BR+BC + + 6.1(front) + FL+FR+LFE+FLC+FRC+SL+SR + + 7.0 FL+FR+FC+BL+BR+SL+SR + + 7.0(front) + FL+FR+FC+FLC+FRC+SL+SR + + 7.1 FL+FR+FC+LFE+BL+BR+SL+SR + + 7.1(wide) + FL+FR+FC+LFE+BL+BR+FLC+FRC + + 7.1(wide-side) + FL+FR+FC+LFE+FLC+FRC+SL+SR + + octagonal + FL+FR+FC+BL+BR+BC+SL+SR + + hexadecagonal + FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR + + downmix + DL+DR + + A custom channel layout can be specified as a sequence of terms, + separated by '+' or '|'. Each term can be: + + o the name of a standard channel layout (e.g. mono, stereo, 4.0, + quad, 5.0, etc.) + + o the name of a single channel (e.g. FL, FR, FC, LFE, etc.) + + o a number of channels, in decimal, followed by 'c', yielding the + default channel layout for that number of channels (see the + function "av_get_default_channel_layout"). Note that not all + channel counts have a default layout. + + o a number of channels, in decimal, followed by 'C', yielding an + unknown channel layout with the specified number of channels. Note + that not all channel layout specification strings support unknown + channel layouts. + + o a channel layout mask, in hexadecimal starting with "0x" (see the + "AV_CH_*" macros in libavutil/channel_layout.h. + + Before libavutil version 53 the trailing character "c" to specify a + number of channels was optional, but now it is required, while a + channel layout mask can also be specified as a decimal number (if and + only if not followed by "c" or "C"). + + See also the function "av_get_channel_layout" defined in + libavutil/channel_layout.h. + +EXPRESSION EVALUATION + When evaluating an arithmetic expression, FFmpeg uses an internal + formula evaluator, implemented through the libavutil/eval.h interface. + + An expression may contain unary, binary operators, constants, and + functions. + + Two expressions expr1 and expr2 can be combined to form another + expression "expr1;expr2". expr1 and expr2 are evaluated in turn, and + the new expression evaluates to the value of expr2. + + The following binary operators are available: "+", "-", "*", "/", "^". + + The following unary operators are available: "+", "-". + + The following functions are available: + + abs(x) + Compute absolute value of x. + + acos(x) + Compute arccosine of x. + + asin(x) + Compute arcsine of x. + + atan(x) + Compute arctangent of x. + + atan2(x, y) + Compute principal value of the arc tangent of y/x. + + between(x, min, max) + Return 1 if x is greater than or equal to min and lesser than or + equal to max, 0 otherwise. + + bitand(x, y) + bitor(x, y) + Compute bitwise and/or operation on x and y. + + The results of the evaluation of x and y are converted to integers + before executing the bitwise operation. + + Note that both the conversion to integer and the conversion back to + floating point can lose precision. Beware of unexpected results for + large numbers (usually 2^53 and larger). + + ceil(expr) + Round the value of expression expr upwards to the nearest integer. + For example, "ceil(1.5)" is "2.0". + + clip(x, min, max) + Return the value of x clipped between min and max. + + cos(x) + Compute cosine of x. + + cosh(x) + Compute hyperbolic cosine of x. + + eq(x, y) + Return 1 if x and y are equivalent, 0 otherwise. + + exp(x) + Compute exponential of x (with base "e", the Euler's number). + + floor(expr) + Round the value of expression expr downwards to the nearest + integer. For example, "floor(-1.5)" is "-2.0". + + gauss(x) + Compute Gauss function of x, corresponding to "exp(-x*x/2) / + sqrt(2*PI)". + + gcd(x, y) + Return the greatest common divisor of x and y. If both x and y are + 0 or either or both are less than zero then behavior is undefined. + + gt(x, y) + Return 1 if x is greater than y, 0 otherwise. + + gte(x, y) + Return 1 if x is greater than or equal to y, 0 otherwise. + + hypot(x, y) + This function is similar to the C function with the same name; it + returns "sqrt(x*x + y*y)", the length of the hypotenuse of a right + triangle with sides of length x and y, or the distance of the point + (x, y) from the origin. + + if(x, y) + Evaluate x, and if the result is non-zero return the result of the + evaluation of y, return 0 otherwise. + + if(x, y, z) + Evaluate x, and if the result is non-zero return the evaluation + result of y, otherwise the evaluation result of z. + + ifnot(x, y) + Evaluate x, and if the result is zero return the result of the + evaluation of y, return 0 otherwise. + + ifnot(x, y, z) + Evaluate x, and if the result is zero return the evaluation result + of y, otherwise the evaluation result of z. + + isinf(x) + Return 1.0 if x is +/-INFINITY, 0.0 otherwise. + + isnan(x) + Return 1.0 if x is NAN, 0.0 otherwise. + + ld(var) + Load the value of the internal variable with number var, which was + previously stored with st(var, expr). The function returns the + loaded value. + + lerp(x, y, z) + Return linear interpolation between x and y by amount of z. + + log(x) + Compute natural logarithm of x. + + lt(x, y) + Return 1 if x is lesser than y, 0 otherwise. + + lte(x, y) + Return 1 if x is lesser than or equal to y, 0 otherwise. + + max(x, y) + Return the maximum between x and y. + + min(x, y) + Return the minimum between x and y. + + mod(x, y) + Compute the remainder of division of x by y. + + not(expr) + Return 1.0 if expr is zero, 0.0 otherwise. + + pow(x, y) + Compute the power of x elevated y, it is equivalent to "(x)^(y)". + + print(t) + print(t, l) + Print the value of expression t with loglevel l. If l is not + specified then a default log level is used. Returns the value of + the expression printed. + + Prints t with loglevel l + + random(x) + Return a pseudo random value between 0.0 and 1.0. x is the index of + the internal variable which will be used to save the seed/state. + + root(expr, max) + Find an input value for which the function represented by expr with + argument ld(0) is 0 in the interval 0..max. + + The expression in expr must denote a continuous function or the + result is undefined. + + ld(0) is used to represent the function input value, which means + that the given expression will be evaluated multiple times with + various input values that the expression can access through ld(0). + When the expression evaluates to 0 then the corresponding input + value will be returned. + + round(expr) + Round the value of expression expr to the nearest integer. For + example, "round(1.5)" is "2.0". + + sgn(x) + Compute sign of x. + + sin(x) + Compute sine of x. + + sinh(x) + Compute hyperbolic sine of x. + + sqrt(expr) + Compute the square root of expr. This is equivalent to "(expr)^.5". + + squish(x) + Compute expression "1/(1 + exp(4*x))". + + st(var, expr) + Store the value of the expression expr in an internal variable. var + specifies the number of the variable where to store the value, and + it is a value ranging from 0 to 9. The function returns the value + stored in the internal variable. Note, Variables are currently not + shared between expressions. + + tan(x) + Compute tangent of x. + + tanh(x) + Compute hyperbolic tangent of x. + + taylor(expr, x) + taylor(expr, x, id) + Evaluate a Taylor series at x, given an expression representing the + "ld(id)"-th derivative of a function at 0. + + When the series does not converge the result is undefined. + + ld(id) is used to represent the derivative order in expr, which + means that the given expression will be evaluated multiple times + with various input values that the expression can access through + "ld(id)". If id is not specified then 0 is assumed. + + Note, when you have the derivatives at y instead of 0, + "taylor(expr, x-y)" can be used. + + time(0) + Return the current (wallclock) time in seconds. + + trunc(expr) + Round the value of expression expr towards zero to the nearest + integer. For example, "trunc(-1.5)" is "-1.0". + + while(cond, expr) + Evaluate expression expr while the expression cond is non-zero, and + returns the value of the last expr evaluation, or NAN if cond was + always false. + + The following constants are available: + + PI area of the unit disc, approximately 3.14 + + E exp(1) (Euler's number), approximately 2.718 + + PHI golden ratio (1+sqrt(5))/2, approximately 1.618 + + Assuming that an expression is considered "true" if it has a non-zero + value, note that: + + "*" works like AND + + "+" works like OR + + For example the construct: + + if (A AND B) then C + + is equivalent to: + + if(A*B, C) + + In your C code, you can extend the list of unary and binary functions, + and define recognized constants, so that they are available for your + expressions. + + The evaluator also recognizes the International System unit prefixes. + If 'i' is appended after the prefix, binary prefixes are used, which + are based on powers of 1024 instead of powers of 1000. The 'B' postfix + multiplies the value by 8, and can be appended after a unit prefix or + used alone. This allows using for example 'KB', 'MiB', 'G' and 'B' as + number postfix. + + The list of available International System prefixes follows, with + indication of the corresponding powers of 10 and of 2. + + y 10^-24 / 2^-80 + + z 10^-21 / 2^-70 + + a 10^-18 / 2^-60 + + f 10^-15 / 2^-50 + + p 10^-12 / 2^-40 + + n 10^-9 / 2^-30 + + u 10^-6 / 2^-20 + + m 10^-3 / 2^-10 + + c 10^-2 + + d 10^-1 + + h 10^2 + + k 10^3 / 2^10 + + K 10^3 / 2^10 + + M 10^6 / 2^20 + + G 10^9 / 2^30 + + T 10^12 / 2^40 + + P 10^15 / 2^40 + + E 10^18 / 2^50 + + Z 10^21 / 2^60 + + Y 10^24 / 2^70 + +CODEC OPTIONS + libavcodec provides some generic global options, which can be set on + all the encoders and decoders. In addition each codec may support so- + called private options, which are specific for a given codec. + + Sometimes, a global option may only affect a specific kind of codec, + and may be nonsensical or ignored by another, so you need to be aware + of the meaning of the specified options. Also some options are meant + only for decoding or encoding. + + Options may be set by specifying -option value in the FFmpeg tools, or + by setting the value explicitly in the "AVCodecContext" options or + using the libavutil/opt.h API for programmatic use. + + The list of supported options follow: + + b integer (encoding,audio,video) + Set bitrate in bits/s. Default value is 200K. + + ab integer (encoding,audio) + Set audio bitrate (in bits/s). Default value is 128K. + + bt integer (encoding,video) + Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate + tolerance specifies how far ratecontrol is willing to deviate from + the target average bitrate value. This is not related to min/max + bitrate. Lowering tolerance too much has an adverse effect on + quality. + + flags flags (decoding/encoding,audio,video,subtitles) + Set generic flags. + + Possible values: + + mv4 Use four motion vector by macroblock (mpeg4). + + qpel + Use 1/4 pel motion compensation. + + loop + Use loop filter. + + qscale + Use fixed qscale. + + pass1 + Use internal 2pass ratecontrol in first pass mode. + + pass2 + Use internal 2pass ratecontrol in second pass mode. + + gray + Only decode/encode grayscale. + + psnr + Set error[?] variables during encoding. + + truncated + Input bitstream might be randomly truncated. + + drop_changed + Don't output frames whose parameters differ from first decoded + frame in stream. Error AVERROR_INPUT_CHANGED is returned when + a frame is dropped. + + ildct + Use interlaced DCT. + + low_delay + Force low delay. + + global_header + Place global headers in extradata instead of every keyframe. + + bitexact + Only write platform-, build- and time-independent data. (except + (I)DCT). This ensures that file and data checksums are + reproducible and match between platforms. Its primary use is + for regression testing. + + aic Apply H263 advanced intra coding / mpeg4 ac prediction. + + ilme + Apply interlaced motion estimation. + + cgop + Use closed gop. + + output_corrupt + Output even potentially corrupted frames. + + time_base rational number + Set codec time base. + + It is the fundamental unit of time (in seconds) in terms of which + frame timestamps are represented. For fixed-fps content, timebase + should be "1 / frame_rate" and timestamp increments should be + identically 1. + + g integer (encoding,video) + Set the group of picture (GOP) size. Default value is 12. + + ar integer (decoding/encoding,audio) + Set audio sampling rate (in Hz). + + ac integer (decoding/encoding,audio) + Set number of audio channels. + + cutoff integer (encoding,audio) + Set cutoff bandwidth. (Supported only by selected encoders, see + their respective documentation sections.) + + frame_size integer (encoding,audio) + Set audio frame size. + + Each submitted frame except the last must contain exactly + frame_size samples per channel. May be 0 when the codec has + CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is + not restricted. It is set by some decoders to indicate constant + frame size. + + frame_number integer + Set the frame number. + + delay integer + qcomp float (encoding,video) + Set video quantizer scale compression (VBR). It is used as a + constant in the ratecontrol equation. Recommended range for default + rc_eq: 0.0-1.0. + + qblur float (encoding,video) + Set video quantizer scale blur (VBR). + + qmin integer (encoding,video) + Set min video quantizer scale (VBR). Must be included between -1 + and 69, default value is 2. + + qmax integer (encoding,video) + Set max video quantizer scale (VBR). Must be included between -1 + and 1024, default value is 31. + + qdiff integer (encoding,video) + Set max difference between the quantizer scale (VBR). + + bf integer (encoding,video) + Set max number of B frames between non-B-frames. + + Must be an integer between -1 and 16. 0 means that B-frames are + disabled. If a value of -1 is used, it will choose an automatic + value depending on the encoder. + + Default value is 0. + + b_qfactor float (encoding,video) + Set qp factor between P and B frames. + + b_strategy integer (encoding,video) + Set strategy to choose between I/P/B-frames. + + ps integer (encoding,video) + Set RTP payload size in bytes. + + mv_bits integer + header_bits integer + i_tex_bits integer + p_tex_bits integer + i_count integer + p_count integer + skip_count integer + misc_bits integer + frame_bits integer + codec_tag integer + bug flags (decoding,video) + Workaround not auto detected encoder bugs. + + Possible values: + + autodetect + xvid_ilace + Xvid interlacing bug (autodetected if fourcc==XVIX) + + ump4 + (autodetected if fourcc==UMP4) + + no_padding + padding bug (autodetected) + + amv + qpel_chroma + std_qpel + old standard qpel (autodetected per fourcc/version) + + qpel_chroma2 + direct_blocksize + direct-qpel-blocksize bug (autodetected per fourcc/version) + + edge + edge padding bug (autodetected per fourcc/version) + + hpel_chroma + dc_clip + ms Workaround various bugs in microsoft broken decoders. + + trunc + trancated frames + + strict integer (decoding/encoding,audio,video) + Specify how strictly to follow the standards. + + Possible values: + + very + strictly conform to an older more strict version of the spec or + reference software + + strict + strictly conform to all the things in the spec no matter what + consequences + + normal + unofficial + allow unofficial extensions + + experimental + allow non standardized experimental things, experimental + (unfinished/work in progress/not well tested) decoders and + encoders. Note: experimental decoders can pose a security + risk, do not use this for decoding untrusted input. + + b_qoffset float (encoding,video) + Set QP offset between P and B frames. + + err_detect flags (decoding,audio,video) + Set error detection flags. + + Possible values: + + crccheck + verify embedded CRCs + + bitstream + detect bitstream specification deviations + + buffer + detect improper bitstream length + + explode + abort decoding on minor error detection + + ignore_err + ignore decoding errors, and continue decoding. This is useful + if you want to analyze the content of a video and thus want + everything to be decoded no matter what. This option will not + result in a video that is pleasing to watch in case of errors. + + careful + consider things that violate the spec and have not been seen in + the wild as errors + + compliant + consider all spec non compliancies as errors + + aggressive + consider things that a sane encoder should not do as an error + + has_b_frames integer + block_align integer + mpeg_quant integer (encoding,video) + Use MPEG quantizers instead of H.263. + + rc_override_count integer + maxrate integer (encoding,audio,video) + Set max bitrate tolerance (in bits/s). Requires bufsize to be set. + + minrate integer (encoding,audio,video) + Set min bitrate tolerance (in bits/s). Most useful in setting up a + CBR encode. It is of little use elsewise. + + bufsize integer (encoding,audio,video) + Set ratecontrol buffer size (in bits). + + i_qfactor float (encoding,video) + Set QP factor between P and I frames. + + i_qoffset float (encoding,video) + Set QP offset between P and I frames. + + dct integer (encoding,video) + Set DCT algorithm. + + Possible values: + + auto + autoselect a good one (default) + + fastint + fast integer + + int accurate integer + + mmx + altivec + faan + floating point AAN DCT + + lumi_mask float (encoding,video) + Compress bright areas stronger than medium ones. + + tcplx_mask float (encoding,video) + Set temporal complexity masking. + + scplx_mask float (encoding,video) + Set spatial complexity masking. + + p_mask float (encoding,video) + Set inter masking. + + dark_mask float (encoding,video) + Compress dark areas stronger than medium ones. + + idct integer (decoding/encoding,video) + Select IDCT implementation. + + Possible values: + + auto + int + simple + simplemmx + simpleauto + Automatically pick a IDCT compatible with the simple one + + arm + altivec + sh4 + simplearm + simplearmv5te + simplearmv6 + simpleneon + xvid + faani + floating point AAN IDCT + + slice_count integer + ec flags (decoding,video) + Set error concealment strategy. + + Possible values: + + guess_mvs + iterative motion vector (MV) search (slow) + + deblock + use strong deblock filter for damaged MBs + + favor_inter + favor predicting from the previous frame instead of the current + + bits_per_coded_sample integer + pred integer (encoding,video) + Set prediction method. + + Possible values: + + left + plane + median + aspect rational number (encoding,video) + Set sample aspect ratio. + + sar rational number (encoding,video) + Set sample aspect ratio. Alias to aspect. + + debug flags (decoding/encoding,audio,video,subtitles) + Print specific debug info. + + Possible values: + + pict + picture info + + rc rate control + + bitstream + mb_type + macroblock (MB) type + + qp per-block quantization parameter (QP) + + dct_coeff + green_metadata + display complexity metadata for the upcoming frame, GoP or for + a given duration. + + skip + startcode + er error recognition + + mmco + memory management control operations (H.264) + + bugs + buffers + picture buffer allocations + + thread_ops + threading operations + + nomc + skip motion compensation + + cmp integer (encoding,video) + Set full pel me compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + subcmp integer (encoding,video) + Set sub pel me compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + mbcmp integer (encoding,video) + Set macroblock compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + ildctcmp integer (encoding,video) + Set interlaced dct compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + dia_size integer (encoding,video) + Set diamond type & size for motion estimation. + + (1024, INT_MAX) + full motion estimation(slowest) + + (768, 1024] + umh motion estimation + + (512, 768] + hex motion estimation + + (256, 512] + l2s diamond motion estimation + + [2,256] + var diamond motion estimation + + (-1, 2) + small diamond motion estimation + + -1 funny diamond motion estimation + + (INT_MIN, -1) + sab diamond motion estimation + + last_pred integer (encoding,video) + Set amount of motion predictors from the previous frame. + + preme integer (encoding,video) + Set pre motion estimation. + + precmp integer (encoding,video) + Set pre motion estimation compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + pre_dia_size integer (encoding,video) + Set diamond type & size for motion estimation pre-pass. + + subq integer (encoding,video) + Set sub pel motion estimation quality. + + me_range integer (encoding,video) + Set limit motion vectors range (1023 for DivX player). + + global_quality integer (encoding,audio,video) + coder integer (encoding,video) + Possible values: + + vlc variable length coder / huffman coder + + ac arithmetic coder + + raw raw (no encoding) + + rle run-length coder + + context integer (encoding,video) + Set context model. + + slice_flags integer + mbd integer (encoding,video) + Set macroblock decision algorithm (high quality mode). + + Possible values: + + simple + use mbcmp (default) + + bits + use fewest bits + + rd use best rate distortion + + sc_threshold integer (encoding,video) + Set scene change threshold. + + nr integer (encoding,video) + Set noise reduction. + + rc_init_occupancy integer (encoding,video) + Set number of bits which should be loaded into the rc buffer before + decoding starts. + + flags2 flags (decoding/encoding,audio,video,subtitles) + Possible values: + + fast + Allow non spec compliant speedup tricks. + + noout + Skip bitstream encoding. + + ignorecrop + Ignore cropping information from sps. + + local_header + Place global headers at every keyframe instead of in extradata. + + chunks + Frame data might be split into multiple chunks. + + showall + Show all frames before the first keyframe. + + export_mvs + Export motion vectors into frame side-data (see + "AV_FRAME_DATA_MOTION_VECTORS") for codecs that support it. See + also doc/examples/export_mvs.c. + + skip_manual + Do not skip samples and export skip information as frame side + data. + + ass_ro_flush_noop + Do not reset ASS ReadOrder field on flush. + + export_side_data flags (decoding/encoding,audio,video,subtitles) + Possible values: + + mvs Export motion vectors into frame side-data (see + "AV_FRAME_DATA_MOTION_VECTORS") for codecs that support it. See + also doc/examples/export_mvs.c. + + prft + Export encoder Producer Reference Time into packet side-data + (see "AV_PKT_DATA_PRFT") for codecs that support it. + + venc_params + Export video encoding parameters through frame side data (see + "AV_FRAME_DATA_VIDEO_ENC_PARAMS") for codecs that support it. + At present, those are H.264 and VP9. + + film_grain + Export film grain parameters through frame side data (see + "AV_FRAME_DATA_FILM_GRAIN_PARAMS"). Supported at present by + AV1 decoders. + + threads integer (decoding/encoding,video) + Set the number of threads to be used, in case the selected codec + implementation supports multi-threading. + + Possible values: + + auto, 0 + automatically select the number of threads to set + + Default value is auto. + + dc integer (encoding,video) + Set intra_dc_precision. + + nssew integer (encoding,video) + Set nsse weight. + + skip_top integer (decoding,video) + Set number of macroblock rows at the top which are skipped. + + skip_bottom integer (decoding,video) + Set number of macroblock rows at the bottom which are skipped. + + profile integer (encoding,audio,video) + Set encoder codec profile. Default value is unknown. Encoder + specific profiles are documented in the relevant encoder + documentation. + + level integer (encoding,audio,video) + Possible values: + + unknown + lowres integer (decoding,audio,video) + Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions. + + skip_threshold integer (encoding,video) + Set frame skip threshold. + + skip_factor integer (encoding,video) + Set frame skip factor. + + skip_exp integer (encoding,video) + Set frame skip exponent. Negative values behave identical to the + corresponding positive ones, except that the score is normalized. + Positive values exist primarily for compatibility reasons and are + not so useful. + + skipcmp integer (encoding,video) + Set frame skip compare function. + + Possible values: + + sad sum of absolute differences, fast (default) + + sse sum of squared errors + + satd + sum of absolute Hadamard transformed differences + + dct sum of absolute DCT transformed differences + + psnr + sum of squared quantization errors (avoid, low quality) + + bit number of bits needed for the block + + rd rate distortion optimal, slow + + zero + 0 + + vsad + sum of absolute vertical differences + + vsse + sum of squared vertical differences + + nsse + noise preserving sum of squared differences + + w53 5/3 wavelet, only used in snow + + w97 9/7 wavelet, only used in snow + + dctmax + chroma + mblmin integer (encoding,video) + Set min macroblock lagrange factor (VBR). + + mblmax integer (encoding,video) + Set max macroblock lagrange factor (VBR). + + mepc integer (encoding,video) + Set motion estimation bitrate penalty compensation (1.0 = 256). + + skip_loop_filter integer (decoding,video) + skip_idct integer (decoding,video) + skip_frame integer (decoding,video) + Make decoder discard processing depending on the frame type + selected by the option value. + + skip_loop_filter skips frame loop filtering, skip_idct skips frame + IDCT/dequantization, skip_frame skips decoding. + + Possible values: + + none + Discard no frame. + + default + Discard useless frames like 0-sized frames. + + noref + Discard all non-reference frames. + + bidir + Discard all bidirectional frames. + + nokey + Discard all frames excepts keyframes. + + nointra + Discard all frames except I frames. + + all Discard all frames. + + Default value is default. + + bidir_refine integer (encoding,video) + Refine the two motion vectors used in bidirectional macroblocks. + + brd_scale integer (encoding,video) + Downscale frames for dynamic B-frame decision. + + keyint_min integer (encoding,video) + Set minimum interval between IDR-frames. + + refs integer (encoding,video) + Set reference frames to consider for motion compensation. + + chromaoffset integer (encoding,video) + Set chroma qp offset from luma. + + trellis integer (encoding,audio,video) + Set rate-distortion optimal quantization. + + mv0_threshold integer (encoding,video) + b_sensitivity integer (encoding,video) + Adjust sensitivity of b_frame_strategy 1. + + compression_level integer (encoding,audio,video) + min_prediction_order integer (encoding,audio) + max_prediction_order integer (encoding,audio) + timecode_frame_start integer (encoding,video) + Set GOP timecode frame start number, in non drop frame format. + + bits_per_raw_sample integer + channel_layout integer (decoding/encoding,audio) + Possible values: + + request_channel_layout integer (decoding,audio) + Possible values: + + rc_max_vbv_use float (encoding,video) + rc_min_vbv_use float (encoding,video) + ticks_per_frame integer (decoding/encoding,audio,video) + color_primaries integer (decoding/encoding,video) + Possible values: + + bt709 + BT.709 + + bt470m + BT.470 M + + bt470bg + BT.470 BG + + smpte170m + SMPTE 170 M + + smpte240m + SMPTE 240 M + + film + Film + + bt2020 + BT.2020 + + smpte428 + smpte428_1 + SMPTE ST 428-1 + + smpte431 + SMPTE 431-2 + + smpte432 + SMPTE 432-1 + + jedec-p22 + JEDEC P22 + + color_trc integer (decoding/encoding,video) + Possible values: + + bt709 + BT.709 + + gamma22 + BT.470 M + + gamma28 + BT.470 BG + + smpte170m + SMPTE 170 M + + smpte240m + SMPTE 240 M + + linear + Linear + + log + log100 + Log + + log_sqrt + log316 + Log square root + + iec61966_2_4 + iec61966-2-4 + IEC 61966-2-4 + + bt1361 + bt1361e + BT.1361 + + iec61966_2_1 + iec61966-2-1 + IEC 61966-2-1 + + bt2020_10 + bt2020_10bit + BT.2020 - 10 bit + + bt2020_12 + bt2020_12bit + BT.2020 - 12 bit + + smpte2084 + SMPTE ST 2084 + + smpte428 + smpte428_1 + SMPTE ST 428-1 + + arib-std-b67 + ARIB STD-B67 + + colorspace integer (decoding/encoding,video) + Possible values: + + rgb RGB + + bt709 + BT.709 + + fcc FCC + + bt470bg + BT.470 BG + + smpte170m + SMPTE 170 M + + smpte240m + SMPTE 240 M + + ycocg + YCOCG + + bt2020nc + bt2020_ncl + BT.2020 NCL + + bt2020c + bt2020_cl + BT.2020 CL + + smpte2085 + SMPTE 2085 + + chroma-derived-nc + Chroma-derived NCL + + chroma-derived-c + Chroma-derived CL + + ictcp + ICtCp + + color_range integer (decoding/encoding,video) + If used as input parameter, it serves as a hint to the decoder, + which color_range the input has. Possible values: + + tv + mpeg + MPEG (219*2^(n-8)) + + pc + jpeg + JPEG (2^n-1) + + chroma_sample_location integer (decoding/encoding,video) + Possible values: + + left + center + topleft + top + bottomleft + bottom + log_level_offset integer + Set the log level offset. + + slices integer (encoding,video) + Number of slices, used in parallelized encoding. + + thread_type flags (decoding/encoding,video) + Select which multithreading methods to use. + + Use of frame will increase decoding delay by one frame per thread, + so clients which cannot provide future frames should not use it. + + Possible values: + + slice + Decode more than one part of a single frame at once. + + Multithreading using slices works only when the video was + encoded with slices. + + frame + Decode more than one frame at once. + + Default value is slice+frame. + + audio_service_type integer (encoding,audio) + Set audio service type. + + Possible values: + + ma Main Audio Service + + ef Effects + + vi Visually Impaired + + hi Hearing Impaired + + di Dialogue + + co Commentary + + em Emergency + + vo Voice Over + + ka Karaoke + + request_sample_fmt sample_fmt (decoding,audio) + Set sample format audio decoders should prefer. Default value is + "none". + + pkt_timebase rational number + sub_charenc encoding (decoding,subtitles) + Set the input subtitles character encoding. + + field_order field_order (video) + Set/override the field order of the video. Possible values: + + progressive + Progressive video + + tt Interlaced video, top field coded and displayed first + + bb Interlaced video, bottom field coded and displayed first + + tb Interlaced video, top coded first, bottom displayed first + + bt Interlaced video, bottom coded first, top displayed first + + skip_alpha bool (decoding,video) + Set to 1 to disable processing alpha (transparency). This works + like the gray flag in the flags option which skips chroma + information instead of alpha. Default is 0. + + codec_whitelist list (input) + "," separated list of allowed decoders. By default all are allowed. + + dump_separator string (input) + Separator used to separate the fields printed on the command line + about the Stream parameters. For example, to separate the fields + with newlines and indentation: + + ffprobe -dump_separator " + " -i ~/videos/matrixbench_mpeg2.mpg + + max_pixels integer (decoding/encoding,video) + Maximum number of pixels per image. This value can be used to avoid + out of memory failures due to large images. + + apply_cropping bool (decoding,video) + Enable cropping if cropping parameters are multiples of the + required alignment for the left and top parameters. If the + alignment is not met the cropping will be partially applied to + maintain alignment. Default is 1 (enabled). Note: The required + alignment depends on if "AV_CODEC_FLAG_UNALIGNED" is set and the + CPU. "AV_CODEC_FLAG_UNALIGNED" cannot be changed from the command + line. Also hardware decoders will not apply left/top Cropping. + +DECODERS + Decoders are configured elements in FFmpeg which allow the decoding of + multimedia streams. + + When you configure your FFmpeg build, all the supported native decoders + are enabled by default. Decoders requiring an external library must be + enabled manually via the corresponding "--enable-lib" option. You can + list all available decoders using the configure option + "--list-decoders". + + You can disable all the decoders with the configure option + "--disable-decoders" and selectively enable / disable single decoders + with the options "--enable-decoder=DECODER" / + "--disable-decoder=DECODER". + + The option "-decoders" of the ff* tools will display the list of + enabled decoders. + +VIDEO DECODERS + A description of some of the currently available video decoders + follows. + + av1 + AOMedia Video 1 (AV1) decoder. + + Options + + operating_point + Select an operating point of a scalable AV1 bitstream (0 - 31). + Default is 0. + + rawvideo + Raw video decoder. + + This decoder decodes rawvideo streams. + + Options + + top top_field_first + Specify the assumed field type of the input video. + + -1 the video is assumed to be progressive (default) + + 0 bottom-field-first is assumed + + 1 top-field-first is assumed + + libdav1d + dav1d AV1 decoder. + + libdav1d allows libavcodec to decode the AOMedia Video 1 (AV1) codec. + Requires the presence of the libdav1d headers and library during + configuration. You need to explicitly configure the build with + "--enable-libdav1d". + + Options + + The following options are supported by the libdav1d wrapper. + + framethreads + Set amount of frame threads to use during decoding. The default + value is 0 (autodetect). + + tilethreads + Set amount of tile threads to use during decoding. The default + value is 0 (autodetect). + + filmgrain + Apply film grain to the decoded video if present in the bitstream. + Defaults to the internal default of the library. + + oppoint + Select an operating point of a scalable AV1 bitstream (0 - 31). + Defaults to the internal default of the library. + + alllayers + Output all spatial layers of a scalable AV1 bitstream. The default + value is false. + + libdavs2 + AVS2-P2/IEEE1857.4 video decoder wrapper. + + This decoder allows libavcodec to decode AVS2 streams with davs2 + library. + + libuavs3d + AVS3-P2/IEEE1857.10 video decoder. + + libuavs3d allows libavcodec to decode AVS3 streams. Requires the + presence of the libuavs3d headers and library during configuration. + You need to explicitly configure the build with "--enable-libuavs3d". + + Options + + The following option is supported by the libuavs3d wrapper. + + frame_threads + Set amount of frame threads to use during decoding. The default + value is 0 (autodetect). + +AUDIO DECODERS + A description of some of the currently available audio decoders + follows. + + ac3 + AC-3 audio decoder. + + This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as + well as the undocumented RealAudio 3 (a.k.a. dnet). + + AC-3 Decoder Options + + -drc_scale value + Dynamic Range Scale Factor. The factor to apply to dynamic range + values from the AC-3 stream. This factor is applied exponentially. + The default value is 1. There are 3 notable scale factor ranges: + + drc_scale == 0 + DRC disabled. Produces full range audio. + + 0 < drc_scale <= 1 + DRC enabled. Applies a fraction of the stream DRC value. + Audio reproduction is between full range and full compression. + + drc_scale > 1 + DRC enabled. Applies drc_scale asymmetrically. Loud sounds are + fully compressed. Soft sounds are enhanced. + + flac + FLAC audio decoder. + + This decoder aims to implement the complete FLAC specification from + Xiph. + + FLAC Decoder options + + -use_buggy_lpc + The lavc FLAC encoder used to produce buggy streams with high lpc + values (like the default value). This option makes it possible to + decode such streams correctly by using lavc's old buggy lpc logic + for decoding. + + ffwavesynth + Internal wave synthesizer. + + This decoder generates wave patterns according to predefined sequences. + Its use is purely internal and the format of the data it accepts is not + publicly documented. + + libcelt + libcelt decoder wrapper. + + libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio + codec. Requires the presence of the libcelt headers and library during + configuration. You need to explicitly configure the build with + "--enable-libcelt". + + libgsm + libgsm decoder wrapper. + + libgsm allows libavcodec to decode the GSM full rate audio codec. + Requires the presence of the libgsm headers and library during + configuration. You need to explicitly configure the build with + "--enable-libgsm". + + This decoder supports both the ordinary GSM and the Microsoft variant. + + libilbc + libilbc decoder wrapper. + + libilbc allows libavcodec to decode the Internet Low Bitrate Codec + (iLBC) audio codec. Requires the presence of the libilbc headers and + library during configuration. You need to explicitly configure the + build with "--enable-libilbc". + + Options + + The following option is supported by the libilbc wrapper. + + enhance + Enable the enhancement of the decoded audio when set to 1. The + default value is 0 (disabled). + + libopencore-amrnb + libopencore-amrnb decoder wrapper. + + libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate + Narrowband audio codec. Using it requires the presence of the + libopencore-amrnb headers and library during configuration. You need to + explicitly configure the build with "--enable-libopencore-amrnb". + + An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB + without this library. + + libopencore-amrwb + libopencore-amrwb decoder wrapper. + + libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate + Wideband audio codec. Using it requires the presence of the + libopencore-amrwb headers and library during configuration. You need to + explicitly configure the build with "--enable-libopencore-amrwb". + + An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB + without this library. + + libopus + libopus decoder wrapper. + + libopus allows libavcodec to decode the Opus Interactive Audio Codec. + Requires the presence of the libopus headers and library during + configuration. You need to explicitly configure the build with + "--enable-libopus". + + An FFmpeg native decoder for Opus exists, so users can decode Opus + without this library. + +SUBTITLES DECODERS + libaribb24 + ARIB STD-B24 caption decoder. + + Implements profiles A and C of the ARIB STD-B24 standard. + + libaribb24 Decoder Options + + -aribb24-base-path path + Sets the base path for the libaribb24 library. This is utilized for + reading of configuration files (for custom unicode conversions), + and for dumping of non-text symbols as images under that location. + + Unset by default. + + -aribb24-skip-ruby-text boolean + Tells the decoder wrapper to skip text blocks that contain half- + height ruby text. + + Enabled by default. + + dvbsub + Options + + compute_clut + -1 Compute clut if no matching CLUT is in the stream. + + 0 Never compute CLUT + + 1 Always compute CLUT and override the one provided in the + stream. + + dvb_substream + Selects the dvb substream, or all substreams if -1 which is + default. + + dvdsub + This codec decodes the bitmap subtitles used in DVDs; the same + subtitles can also be found in VobSub file pairs and in some Matroska + files. + + Options + + palette + Specify the global palette used by the bitmaps. When stored in + VobSub, the palette is normally specified in the index file; in + Matroska, the palette is stored in the codec extra-data in the same + format as in VobSub. In DVDs, the palette is stored in the IFO + file, and therefore not available when reading from dumped VOB + files. + + The format for this option is a string containing 16 24-bits + hexadecimal numbers (without 0x prefix) separated by commas, for + example "0d00ee, ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, + 0d617a, 7b7b7b, d1d1d1, 7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, + 7c127b". + + ifo_palette + Specify the IFO file from which the global palette is obtained. + (experimental) + + forced_subs_only + Only decode subtitle entries marked as forced. Some titles have + forced and non-forced subtitles in the same track. Setting this + flag to 1 will only keep the forced subtitles. Default value is 0. + + libzvbi-teletext + Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext + subtitles. Requires the presence of the libzvbi headers and library + during configuration. You need to explicitly configure the build with + "--enable-libzvbi". + + Options + + txt_page + List of teletext page numbers to decode. Pages that do not match + the specified list are dropped. You may use the special "*" string + to match all pages, or "subtitle" to match all subtitle pages. + Default value is *. + + txt_default_region + Set default character set used for decoding, a value between 0 and + 87 (see ETS 300 706, Section 15, Table 32). Default value is -1, + which does not override the libzvbi default. This option is needed + for some legacy level 1.0 transmissions which cannot signal the + proper charset. + + txt_chop_top + Discards the top teletext line. Default value is 1. + + txt_format + Specifies the format of the decoded subtitles. + + bitmap + The default format, you should use this for teletext pages, + because certain graphics and colors cannot be expressed in + simple text or even ASS. + + text + Simple text based output without formatting. + + ass Formatted ASS output, subtitle pages and teletext pages are + returned in different styles, subtitle pages are stripped down + to text, but an effort is made to keep the text alignment and + the formatting. + + txt_left + X offset of generated bitmaps, default is 0. + + txt_top + Y offset of generated bitmaps, default is 0. + + txt_chop_spaces + Chops leading and trailing spaces and removes empty lines from the + generated text. This option is useful for teletext based subtitles + where empty spaces may be present at the start or at the end of the + lines or empty lines may be present between the subtitle lines + because of double-sized teletext characters. Default value is 1. + + txt_duration + Sets the display duration of the decoded teletext pages or + subtitles in milliseconds. Default value is -1 which means infinity + or until the next subtitle event comes. + + txt_transparent + Force transparent background of the generated teletext bitmaps. + Default value is 0 which means an opaque background. + + txt_opacity + Sets the opacity (0-255) of the teletext background. If + txt_transparent is not set, it only affects characters between a + start box and an end box, typically subtitles. Default value is 0 + if txt_transparent is set, 255 otherwise. + +ENCODERS + Encoders are configured elements in FFmpeg which allow the encoding of + multimedia streams. + + When you configure your FFmpeg build, all the supported native encoders + are enabled by default. Encoders requiring an external library must be + enabled manually via the corresponding "--enable-lib" option. You can + list all available encoders using the configure option + "--list-encoders". + + You can disable all the encoders with the configure option + "--disable-encoders" and selectively enable / disable single encoders + with the options "--enable-encoder=ENCODER" / + "--disable-encoder=ENCODER". + + The option "-encoders" of the ff* tools will display the list of + enabled encoders. + +AUDIO ENCODERS + A description of some of the currently available audio encoders + follows. + + aac + Advanced Audio Coding (AAC) encoder. + + This encoder is the default AAC encoder, natively implemented into + FFmpeg. + + Options + + b Set bit rate in bits/s. Setting this automatically activates + constant bit rate (CBR) mode. If this option is unspecified it is + set to 128kbps. + + q Set quality for variable bit rate (VBR) mode. This option is valid + only using the ffmpeg command-line tool. For library interface + users, use global_quality. + + cutoff + Set cutoff frequency. If unspecified will allow the encoder to + dynamically adjust the cutoff to improve clarity on low bitrates. + + aac_coder + Set AAC encoder coding method. Possible values: + + twoloop + Two loop searching (TLS) method. + + This method first sets quantizers depending on band thresholds + and then tries to find an optimal combination by adding or + subtracting a specific value from all quantizers and adjusting + some individual quantizer a little. Will tune itself based on + whether aac_is, aac_ms and aac_pns are enabled. + + anmr + Average noise to mask ratio (ANMR) trellis-based solution. + + This is an experimental coder which currently produces a lower + quality, is more unstable and is slower than the default + twoloop coder but has potential. Currently has no support for + the aac_is or aac_pns options. Not currently recommended. + + fast + Constant quantizer method. + + Uses a cheaper version of twoloop algorithm that doesn't try to + do as many clever adjustments. Worse with low bitrates (less + than 64kbps), but is better and much faster at higher bitrates. + This is the default choice for a coder + + aac_ms + Sets mid/side coding mode. The default value of "auto" will + automatically use M/S with bands which will benefit from such + coding. Can be forced for all bands using the value "enable", which + is mainly useful for debugging or disabled using "disable". + + aac_is + Sets intensity stereo coding tool usage. By default, it's enabled + and will automatically toggle IS for similar pairs of stereo bands + if it's beneficial. Can be disabled for debugging by setting the + value to "disable". + + aac_pns + Uses perceptual noise substitution to replace low entropy high + frequency bands with imperceptible white noise during the decoding + process. By default, it's enabled, but can be disabled for + debugging purposes by using "disable". + + aac_tns + Enables the use of a multitap FIR filter which spans through the + high frequency bands to hide quantization noise during the encoding + process and is reverted by the decoder. As well as decreasing + unpleasant artifacts in the high range this also reduces the + entropy in the high bands and allows for more bits to be used by + the mid-low bands. By default it's enabled but can be disabled for + debugging by setting the option to "disable". + + aac_ltp + Enables the use of the long term prediction extension which + increases coding efficiency in very low bandwidth situations such + as encoding of voice or solo piano music by extending constant + harmonic peaks in bands throughout frames. This option is implied + by profile:a aac_low and is incompatible with aac_pred. Use in + conjunction with -ar to decrease the samplerate. + + aac_pred + Enables the use of a more traditional style of prediction where the + spectral coefficients transmitted are replaced by the difference of + the current coefficients minus the previous "predicted" + coefficients. In theory and sometimes in practice this can improve + quality for low to mid bitrate audio. This option implies the + aac_main profile and is incompatible with aac_ltp. + + profile + Sets the encoding profile, possible values: + + aac_low + The default, AAC "Low-complexity" profile. Is the most + compatible and produces decent quality. + + mpeg2_aac_low + Equivalent to "-profile:a aac_low -aac_pns 0". PNS was + introduced with the MPEG4 specifications. + + aac_ltp + Long term prediction profile, is enabled by and will enable the + aac_ltp option. Introduced in MPEG4. + + aac_main + Main-type prediction profile, is enabled by and will enable the + aac_pred option. Introduced in MPEG2. + + If this option is unspecified it is set to aac_low. + + ac3 and ac3_fixed + AC-3 audio encoders. + + These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as + well as the undocumented RealAudio 3 (a.k.a. dnet). + + The ac3 encoder uses floating-point math, while the ac3_fixed encoder + only uses fixed-point integer math. This does not mean that one is + always faster, just that one or the other may be better suited to a + particular system. The ac3_fixed encoder is not the default codec for + any of the output formats, so it must be specified explicitly using the + option "-acodec ac3_fixed" in order to use it. + + AC-3 Metadata + + The AC-3 metadata options are used to set parameters that describe the + audio, but in most cases do not affect the audio encoding itself. Some + of the options do directly affect or influence the decoding and + playback of the resulting bitstream, while others are just for + informational purposes. A few of the options will add bits to the + output stream that could otherwise be used for audio data, and will + thus affect the quality of the output. Those will be indicated + accordingly with a note in the option list below. + + These parameters are described in detail in several publicly-available + documents. + + *<> + *<> + *<> + *<> + + Metadata Control Options + + -per_frame_metadata boolean + Allow Per-Frame Metadata. Specifies if the encoder should check for + changing metadata for each frame. + + 0 The metadata values set at initialization will be used for + every frame in the stream. (default) + + 1 Metadata values can be changed before encoding each frame. + + Downmix Levels + + -center_mixlev level + Center Mix Level. The amount of gain the decoder should apply to + the center channel when downmixing to stereo. This field will only + be written to the bitstream if a center channel is present. The + value is specified as a scale factor. There are 3 valid values: + + 0.707 + Apply -3dB gain + + 0.595 + Apply -4.5dB gain (default) + + 0.500 + Apply -6dB gain + + -surround_mixlev level + Surround Mix Level. The amount of gain the decoder should apply to + the surround channel(s) when downmixing to stereo. This field will + only be written to the bitstream if one or more surround channels + are present. The value is specified as a scale factor. There are 3 + valid values: + + 0.707 + Apply -3dB gain + + 0.500 + Apply -6dB gain (default) + + 0.000 + Silence Surround Channel(s) + + Audio Production Information + + Audio Production Information is optional information describing the + mixing environment. Either none or both of the fields are written to + the bitstream. + + -mixing_level number + Mixing Level. Specifies peak sound pressure level (SPL) in the + production environment when the mix was mastered. Valid values are + 80 to 111, or -1 for unknown or not indicated. The default value is + -1, but that value cannot be used if the Audio Production + Information is written to the bitstream. Therefore, if the + "room_type" option is not the default value, the "mixing_level" + option must not be -1. + + -room_type type + Room Type. Describes the equalization used during the final mixing + session at the studio or on the dubbing stage. A large room is a + dubbing stage with the industry standard X-curve equalization; a + small room has flat equalization. This field will not be written + to the bitstream if both the "mixing_level" option and the + "room_type" option have the default values. + + 0 + notindicated + Not Indicated (default) + + 1 + large + Large Room + + 2 + small + Small Room + + Other Metadata Options + + -copyright boolean + Copyright Indicator. Specifies whether a copyright exists for this + audio. + + 0 + off No Copyright Exists (default) + + 1 + on Copyright Exists + + -dialnorm value + Dialogue Normalization. Indicates how far the average dialogue + level of the program is below digital 100% full scale (0 dBFS). + This parameter determines a level shift during audio reproduction + that sets the average volume of the dialogue to a preset level. The + goal is to match volume level between program sources. A value of + -31dB will result in no volume level change, relative to the source + volume, during audio reproduction. Valid values are whole numbers + in the range -31 to -1, with -31 being the default. + + -dsur_mode mode + Dolby Surround Mode. Specifies whether the stereo signal uses Dolby + Surround (Pro Logic). This field will only be written to the + bitstream if the audio stream is stereo. Using this option does NOT + mean the encoder will actually apply Dolby Surround processing. + + 0 + notindicated + Not Indicated (default) + + 1 + off Not Dolby Surround Encoded + + 2 + on Dolby Surround Encoded + + -original boolean + Original Bit Stream Indicator. Specifies whether this audio is from + the original source and not a copy. + + 0 + off Not Original Source + + 1 + on Original Source (default) + + Extended Bitstream Information + + The extended bitstream options are part of the Alternate Bit Stream + Syntax as specified in Annex D of the A/52:2010 standard. It is grouped + into 2 parts. If any one parameter in a group is specified, all values + in that group will be written to the bitstream. Default values are + used for those that are written but have not been specified. If the + mixing levels are written, the decoder will use these values instead of + the ones specified in the "center_mixlev" and "surround_mixlev" options + if it supports the Alternate Bit Stream Syntax. + + Extended Bitstream Information - Part 1 + + -dmix_mode mode + Preferred Stereo Downmix Mode. Allows the user to select either + Lt/Rt (Dolby Surround) or Lo/Ro (normal stereo) as the preferred + stereo downmix mode. + + 0 + notindicated + Not Indicated (default) + + 1 + ltrt + Lt/Rt Downmix Preferred + + 2 + loro + Lo/Ro Downmix Preferred + + -ltrt_cmixlev level + Lt/Rt Center Mix Level. The amount of gain the decoder should apply + to the center channel when downmixing to stereo in Lt/Rt mode. + + 1.414 + Apply +3dB gain + + 1.189 + Apply +1.5dB gain + + 1.000 + Apply 0dB gain + + 0.841 + Apply -1.5dB gain + + 0.707 + Apply -3.0dB gain + + 0.595 + Apply -4.5dB gain (default) + + 0.500 + Apply -6.0dB gain + + 0.000 + Silence Center Channel + + -ltrt_surmixlev level + Lt/Rt Surround Mix Level. The amount of gain the decoder should + apply to the surround channel(s) when downmixing to stereo in Lt/Rt + mode. + + 0.841 + Apply -1.5dB gain + + 0.707 + Apply -3.0dB gain + + 0.595 + Apply -4.5dB gain + + 0.500 + Apply -6.0dB gain (default) + + 0.000 + Silence Surround Channel(s) + + -loro_cmixlev level + Lo/Ro Center Mix Level. The amount of gain the decoder should apply + to the center channel when downmixing to stereo in Lo/Ro mode. + + 1.414 + Apply +3dB gain + + 1.189 + Apply +1.5dB gain + + 1.000 + Apply 0dB gain + + 0.841 + Apply -1.5dB gain + + 0.707 + Apply -3.0dB gain + + 0.595 + Apply -4.5dB gain (default) + + 0.500 + Apply -6.0dB gain + + 0.000 + Silence Center Channel + + -loro_surmixlev level + Lo/Ro Surround Mix Level. The amount of gain the decoder should + apply to the surround channel(s) when downmixing to stereo in Lo/Ro + mode. + + 0.841 + Apply -1.5dB gain + + 0.707 + Apply -3.0dB gain + + 0.595 + Apply -4.5dB gain + + 0.500 + Apply -6.0dB gain (default) + + 0.000 + Silence Surround Channel(s) + + Extended Bitstream Information - Part 2 + + -dsurex_mode mode + Dolby Surround EX Mode. Indicates whether the stream uses Dolby + Surround EX (7.1 matrixed to 5.1). Using this option does NOT mean + the encoder will actually apply Dolby Surround EX processing. + + 0 + notindicated + Not Indicated (default) + + 1 + on Dolby Surround EX Off + + 2 + off Dolby Surround EX On + + -dheadphone_mode mode + Dolby Headphone Mode. Indicates whether the stream uses Dolby + Headphone encoding (multi-channel matrixed to 2.0 for use with + headphones). Using this option does NOT mean the encoder will + actually apply Dolby Headphone processing. + + 0 + notindicated + Not Indicated (default) + + 1 + on Dolby Headphone Off + + 2 + off Dolby Headphone On + + -ad_conv_type type + A/D Converter Type. Indicates whether the audio has passed through + HDCD A/D conversion. + + 0 + standard + Standard A/D Converter (default) + + 1 + hdcd + HDCD A/D Converter + + Other AC-3 Encoding Options + + -stereo_rematrixing boolean + Stereo Rematrixing. Enables/Disables use of rematrixing for stereo + input. This is an optional AC-3 feature that increases quality by + selectively encoding the left/right channels as mid/side. This + option is enabled by default, and it is highly recommended that it + be left as enabled except for testing purposes. + + cutoff frequency + Set lowpass cutoff frequency. If unspecified, the encoder selects a + default determined by various other encoding parameters. + + Floating-Point-Only AC-3 Encoding Options + + These options are only valid for the floating-point encoder and do not + exist for the fixed-point encoder due to the corresponding features not + being implemented in fixed-point. + + -channel_coupling boolean + Enables/Disables use of channel coupling, which is an optional AC-3 + feature that increases quality by combining high frequency + information from multiple channels into a single channel. The per- + channel high frequency information is sent with less accuracy in + both the frequency and time domains. This allows more bits to be + used for lower frequencies while preserving enough information to + reconstruct the high frequencies. This option is enabled by default + for the floating-point encoder and should generally be left as + enabled except for testing purposes or to increase encoding speed. + + -1 + auto + Selected by Encoder (default) + + 0 + off Disable Channel Coupling + + 1 + on Enable Channel Coupling + + -cpl_start_band number + Coupling Start Band. Sets the channel coupling start band, from 1 + to 15. If a value higher than the bandwidth is used, it will be + reduced to 1 less than the coupling end band. If auto is used, the + start band will be determined by the encoder based on the bit rate, + sample rate, and channel layout. This option has no effect if + channel coupling is disabled. + + -1 + auto + Selected by Encoder (default) + + flac + FLAC (Free Lossless Audio Codec) Encoder + + Options + + The following options are supported by FFmpeg's flac encoder. + + compression_level + Sets the compression level, which chooses defaults for many other + options if they are not set explicitly. Valid values are from 0 to + 12, 5 is the default. + + frame_size + Sets the size of the frames in samples per channel. + + lpc_coeff_precision + Sets the LPC coefficient precision, valid values are from 1 to 15, + 15 is the default. + + lpc_type + Sets the first stage LPC algorithm + + none + LPC is not used + + fixed + fixed LPC coefficients + + levinson + cholesky + lpc_passes + Number of passes to use for Cholesky factorization during LPC + analysis + + min_partition_order + The minimum partition order + + max_partition_order + The maximum partition order + + prediction_order_method + estimation + 2level + 4level + 8level + search + Bruteforce search + + log + ch_mode + Channel mode + + auto + The mode is chosen automatically for each frame + + indep + Channels are independently coded + + left_side + right_side + mid_side + exact_rice_parameters + Chooses if rice parameters are calculated exactly or approximately. + if set to 1 then they are chosen exactly, which slows the code down + slightly and improves compression slightly. + + multi_dim_quant + Multi Dimensional Quantization. If set to 1 then a 2nd stage LPC + algorithm is applied after the first stage to finetune the + coefficients. This is quite slow and slightly improves compression. + + opus + Opus encoder. + + This is a native FFmpeg encoder for the Opus format. Currently its in + development and only implements the CELT part of the codec. Its quality + is usually worse and at best is equal to the libopus encoder. + + Options + + b Set bit rate in bits/s. If unspecified it uses the number of + channels and the layout to make a good guess. + + opus_delay + Sets the maximum delay in milliseconds. Lower delays than 20ms will + very quickly decrease quality. + + libfdk_aac + libfdk-aac AAC (Advanced Audio Coding) encoder wrapper. + + The libfdk-aac library is based on the Fraunhofer FDK AAC code from the + Android project. + + Requires the presence of the libfdk-aac headers and library during + configuration. You need to explicitly configure the build with + "--enable-libfdk-aac". The library is also incompatible with GPL, so if + you allow the use of GPL, you should configure with "--enable-gpl + --enable-nonfree --enable-libfdk-aac". + + This encoder has support for the AAC-HE profiles. + + VBR encoding, enabled through the vbr or flags +qscale options, is + experimental and only works with some combinations of parameters. + + Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 + or higher. + + For more information see the fdk-aac project at + . + + Options + + The following options are mapped on the shared FFmpeg codec options. + + b Set bit rate in bits/s. If the bitrate is not explicitly specified, + it is automatically set to a suitable value depending on the + selected profile. + + In case VBR mode is enabled the option is ignored. + + ar Set audio sampling rate (in Hz). + + channels + Set the number of audio channels. + + flags +qscale + Enable fixed quality, VBR (Variable Bit Rate) mode. Note that VBR + is implicitly enabled when the vbr value is positive. + + cutoff + Set cutoff frequency. If not specified (or explicitly set to 0) it + will use a value automatically computed by the library. Default + value is 0. + + profile + Set audio profile. + + The following profiles are recognized: + + aac_low + Low Complexity AAC (LC) + + aac_he + High Efficiency AAC (HE-AAC) + + aac_he_v2 + High Efficiency AAC version 2 (HE-AACv2) + + aac_ld + Low Delay AAC (LD) + + aac_eld + Enhanced Low Delay AAC (ELD) + + If not specified it is set to aac_low. + + The following are private options of the libfdk_aac encoder. + + afterburner + Enable afterburner feature if set to 1, disabled if set to 0. This + improves the quality but also the required processing power. + + Default value is 1. + + eld_sbr + Enable SBR (Spectral Band Replication) for ELD if set to 1, + disabled if set to 0. + + Default value is 0. + + eld_v2 + Enable ELDv2 (LD-MPS extension for ELD stereo signals) for ELDv2 if + set to 1, disabled if set to 0. + + Note that option is available when fdk-aac version + (AACENCODER_LIB_VL0.AACENCODER_LIB_VL1.AACENCODER_LIB_VL2) > + (4.0.0). + + Default value is 0. + + signaling + Set SBR/PS signaling style. + + It can assume one of the following values: + + default + choose signaling implicitly (explicit hierarchical by default, + implicit if global header is disabled) + + implicit + implicit backwards compatible signaling + + explicit_sbr + explicit SBR, implicit PS signaling + + explicit_hierarchical + explicit hierarchical signaling + + Default value is default. + + latm + Output LATM/LOAS encapsulated data if set to 1, disabled if set to + 0. + + Default value is 0. + + header_period + Set StreamMuxConfig and PCE repetition period (in frames) for + sending in-band configuration buffers within LATM/LOAS transport + layer. + + Must be a 16-bits non-negative integer. + + Default value is 0. + + vbr Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty + good) and 5 is highest quality. A value of 0 will disable VBR, and + CBR (Constant Bit Rate) is enabled. + + Currently only the aac_low profile supports VBR encoding. + + VBR modes 1-5 correspond to roughly the following average bit + rates: + + 1 32 kbps/channel + + 2 40 kbps/channel + + 3 48-56 kbps/channel + + 4 64 kbps/channel + + 5 about 80-96 kbps/channel + + Default value is 0. + + Examples + + o Use ffmpeg to convert an audio file to VBR AAC in an M4A (MP4) + container: + + ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a + + o Use ffmpeg to convert an audio file to CBR 64k kbps AAC, using the + High-Efficiency AAC profile: + + ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a + + libmp3lame + LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper. + + Requires the presence of the libmp3lame headers and library during + configuration. You need to explicitly configure the build with + "--enable-libmp3lame". + + See libshine for a fixed-point MP3 encoder, although with a lower + quality. + + Options + + The following options are supported by the libmp3lame wrapper. The + lame-equivalent of the options are listed in parentheses. + + b (-b) + Set bitrate expressed in bits/s for CBR or ABR. LAME "bitrate" is + expressed in kilobits/s. + + q (-V) + Set constant quality setting for VBR. This option is valid only + using the ffmpeg command-line tool. For library interface users, + use global_quality. + + compression_level (-q) + Set algorithm quality. Valid arguments are integers in the 0-9 + range, with 0 meaning highest quality but slowest, and 9 meaning + fastest while producing the worst quality. + + cutoff (--lowpass) + Set lowpass cutoff frequency. If unspecified, the encoder + dynamically adjusts the cutoff. + + reservoir + Enable use of bit reservoir when set to 1. Default value is 1. LAME + has this enabled by default, but can be overridden by use --nores + option. + + joint_stereo (-m j) + Enable the encoder to use (on a frame by frame basis) either L/R + stereo or mid/side stereo. Default value is 1. + + abr (--abr) + Enable the encoder to use ABR when set to 1. The lame --abr sets + the target bitrate, while this options only tells FFmpeg to use ABR + still relies on b to set bitrate. + + libopencore-amrnb + OpenCORE Adaptive Multi-Rate Narrowband encoder. + + Requires the presence of the libopencore-amrnb headers and library + during configuration. You need to explicitly configure the build with + "--enable-libopencore-amrnb --enable-version3". + + This is a mono-only encoder. Officially it only supports 8000Hz sample + rate, but you can override it by setting strict to unofficial or lower. + + Options + + b Set bitrate in bits per second. Only the following bitrates are + supported, otherwise libavcodec will round to the nearest valid + bitrate. + + 4750 + 5150 + 5900 + 6700 + 7400 + 7950 + 10200 + 12200 + dtx Allow discontinuous transmission (generate comfort noise) when set + to 1. The default value is 0 (disabled). + + libopus + libopus Opus Interactive Audio Codec encoder wrapper. + + Requires the presence of the libopus headers and library during + configuration. You need to explicitly configure the build with + "--enable-libopus". + + Option Mapping + + Most libopus options are modelled after the opusenc utility from opus- + tools. The following is an option mapping chart describing options + supported by the libopus wrapper, and their opusenc-equivalent in + parentheses. + + b (bitrate) + Set the bit rate in bits/s. FFmpeg's b option is expressed in + bits/s, while opusenc's bitrate in kilobits/s. + + vbr (vbr, hard-cbr, and cvbr) + Set VBR mode. The FFmpeg vbr option has the following valid + arguments, with the opusenc equivalent options in parentheses: + + off (hard-cbr) + Use constant bit rate encoding. + + on (vbr) + Use variable bit rate encoding (the default). + + constrained (cvbr) + Use constrained variable bit rate encoding. + + compression_level (comp) + Set encoding algorithm complexity. Valid options are integers in + the 0-10 range. 0 gives the fastest encodes but lower quality, + while 10 gives the highest quality but slowest encoding. The + default is 10. + + frame_duration (framesize) + Set maximum frame size, or duration of a frame in milliseconds. The + argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. + Smaller frame sizes achieve lower latency but less quality at a + given bitrate. Sizes greater than 20ms are only interesting at + fairly low bitrates. The default is 20ms. + + packet_loss (expect-loss) + Set expected packet loss percentage. The default is 0. + + fec (n/a) + Enable inband forward error correction. packet_loss must be non- + zero to take advantage - frequency of FEC 'side-data' is + proportional to expected packet loss. Default is disabled. + + application (N.A.) + Set intended application type. Valid options are listed below: + + voip + Favor improved speech intelligibility. + + audio + Favor faithfulness to the input (the default). + + lowdelay + Restrict to only the lowest delay modes. + + cutoff (N.A.) + Set cutoff bandwidth in Hz. The argument must be exactly one of the + following: 4000, 6000, 8000, 12000, or 20000, corresponding to + narrowband, mediumband, wideband, super wideband, and fullband + respectively. The default is 0 (cutoff disabled). + + mapping_family (mapping_family) + Set channel mapping family to be used by the encoder. The default + value of -1 uses mapping family 0 for mono and stereo inputs, and + mapping family 1 otherwise. The default also disables the surround + masking and LFE bandwidth optimzations in libopus, and requires + that the input contains 8 channels or fewer. + + Other values include 0 for mono and stereo, 1 for surround sound + with masking and LFE bandwidth optimizations, and 255 for + independent streams with an unspecified channel layout. + + apply_phase_inv (N.A.) (requires libopus >= 1.2) + If set to 0, disables the use of phase inversion for intensity + stereo, improving the quality of mono downmixes, but slightly + reducing normal stereo quality. The default is 1 (phase inversion + enabled). + + libshine + Shine Fixed-Point MP3 encoder wrapper. + + Shine is a fixed-point MP3 encoder. It has a far better performance on + platforms without an FPU, e.g. armel CPUs, and some phones and tablets. + However, as it is more targeted on performance than quality, it is not + on par with LAME and other production-grade encoders quality-wise. + Also, according to the project's homepage, this encoder may not be free + of bugs as the code was written a long time ago and the project was + dead for at least 5 years. + + This encoder only supports stereo and mono input. This is also CBR- + only. + + The original project (last updated in early 2007) is at + . We only support the + updated fork by the Savonet/Liquidsoap project at + . + + Requires the presence of the libshine headers and library during + configuration. You need to explicitly configure the build with + "--enable-libshine". + + See also libmp3lame. + + Options + + The following options are supported by the libshine wrapper. The + shineenc-equivalent of the options are listed in parentheses. + + b (-b) + Set bitrate expressed in bits/s for CBR. shineenc -b option is + expressed in kilobits/s. + + libtwolame + TwoLAME MP2 encoder wrapper. + + Requires the presence of the libtwolame headers and library during + configuration. You need to explicitly configure the build with + "--enable-libtwolame". + + Options + + The following options are supported by the libtwolame wrapper. The + twolame-equivalent options follow the FFmpeg ones and are in + parentheses. + + b (-b) + Set bitrate expressed in bits/s for CBR. twolame b option is + expressed in kilobits/s. Default value is 128k. + + q (-V) + Set quality for experimental VBR support. Maximum value range is + from -50 to 50, useful range is from -10 to 10. The higher the + value, the better the quality. This option is valid only using the + ffmpeg command-line tool. For library interface users, use + global_quality. + + mode (--mode) + Set the mode of the resulting audio. Possible values: + + auto + Choose mode automatically based on the input. This is the + default. + + stereo + Stereo + + joint_stereo + Joint stereo + + dual_channel + Dual channel + + mono + Mono + + psymodel (--psyc-mode) + Set psychoacoustic model to use in encoding. The argument must be + an integer between -1 and 4, inclusive. The higher the value, the + better the quality. The default value is 3. + + energy_levels (--energy) + Enable energy levels extensions when set to 1. The default value is + 0 (disabled). + + error_protection (--protect) + Enable CRC error protection when set to 1. The default value is 0 + (disabled). + + copyright (--copyright) + Set MPEG audio copyright flag when set to 1. The default value is 0 + (disabled). + + original (--original) + Set MPEG audio original flag when set to 1. The default value is 0 + (disabled). + + libvo-amrwbenc + VisualOn Adaptive Multi-Rate Wideband encoder. + + Requires the presence of the libvo-amrwbenc headers and library during + configuration. You need to explicitly configure the build with + "--enable-libvo-amrwbenc --enable-version3". + + This is a mono-only encoder. Officially it only supports 16000Hz sample + rate, but you can override it by setting strict to unofficial or lower. + + Options + + b Set bitrate in bits/s. Only the following bitrates are supported, + otherwise libavcodec will round to the nearest valid bitrate. + + 6600 + 8850 + 12650 + 14250 + 15850 + 18250 + 19850 + 23050 + 23850 + dtx Allow discontinuous transmission (generate comfort noise) when set + to 1. The default value is 0 (disabled). + + libvorbis + libvorbis encoder wrapper. + + Requires the presence of the libvorbisenc headers and library during + configuration. You need to explicitly configure the build with + "--enable-libvorbis". + + Options + + The following options are supported by the libvorbis wrapper. The + oggenc-equivalent of the options are listed in parentheses. + + To get a more accurate and extensive documentation of the libvorbis + options, consult the libvorbisenc's and oggenc's documentations. See + , , and + oggenc(1). + + b (-b) + Set bitrate expressed in bits/s for ABR. oggenc -b is expressed in + kilobits/s. + + q (-q) + Set constant quality setting for VBR. The value should be a float + number in the range of -1.0 to 10.0. The higher the value, the + better the quality. The default value is 3.0. + + This option is valid only using the ffmpeg command-line tool. For + library interface users, use global_quality. + + cutoff (--advanced-encode-option lowpass_frequency=N) + Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc's + related option is expressed in kHz. The default value is 0 (cutoff + disabled). + + minrate (-m) + Set minimum bitrate expressed in bits/s. oggenc -m is expressed in + kilobits/s. + + maxrate (-M) + Set maximum bitrate expressed in bits/s. oggenc -M is expressed in + kilobits/s. This only has effect on ABR mode. + + iblock (--advanced-encode-option impulse_noisetune=N) + Set noise floor bias for impulse blocks. The value is a float + number from -15.0 to 0.0. A negative bias instructs the encoder to + pay special attention to the crispness of transients in the encoded + audio. The tradeoff for better transient response is a higher + bitrate. + + mjpeg + Motion JPEG encoder. + + Options + + huffman + Set the huffman encoding strategy. Possible values: + + default + Use the default huffman tables. This is the default strategy. + + optimal + Compute and use optimal huffman tables. + + wavpack + WavPack lossless audio encoder. + + Options + + The equivalent options for wavpack command line utility are listed in + parentheses. + + Shared options + + The following shared options are effective for this encoder. Only + special notes about this particular encoder will be documented here. + For the general meaning of the options, see the Codec Options chapter. + + frame_size (--blocksize) + For this encoder, the range for this option is between 128 and + 131072. Default is automatically decided based on sample rate and + number of channel. + + For the complete formula of calculating default, see + libavcodec/wavpackenc.c. + + compression_level (-f, -h, -hh, and -x) + + Private options + + joint_stereo (-j) + Set whether to enable joint stereo. Valid values are: + + on (1) + Force mid/side audio encoding. + + off (0) + Force left/right audio encoding. + + auto + Let the encoder decide automatically. + + optimize_mono + Set whether to enable optimization for mono. This option is only + effective for non-mono streams. Available values: + + on enabled + + off disabled + +VIDEO ENCODERS + A description of some of the currently available video encoders + follows. + + GIF + GIF image/animation encoder. + + Options + + gifflags integer + Sets the flags used for GIF encoding. + + offsetting + Enables picture offsetting. + + Default is enabled. + + transdiff + Enables transparency detection between frames. + + Default is enabled. + + gifimage integer + Enables encoding one full GIF image per frame, rather than an + animated GIF. + + Default value is 0. + + global_palette integer + Writes a palette to the global GIF header where feasible. + + If disabled, every frame will always have a palette written, even + if there is a global palette supplied. + + Default value is 1. + + Hap + Vidvox Hap video encoder. + + Options + + format integer + Specifies the Hap format to encode. + + hap + hap_alpha + hap_q + + Default value is hap. + + chunks integer + Specifies the number of chunks to split frames into, between 1 and + 64. This permits multithreaded decoding of large frames, + potentially at the cost of data-rate. The encoder may modify this + value to divide frames evenly. + + Default value is 1. + + compressor integer + Specifies the second-stage compressor to use. If set to none, + chunks will be limited to 1, as chunked uncompressed frames offer + no benefit. + + none + snappy + + Default value is snappy. + + jpeg2000 + The native jpeg 2000 encoder is lossy by default, the "-q:v" option can + be used to set the encoding quality. Lossless encoding can be selected + with "-pred 1". + + Options + + format integer + Can be set to either "j2k" or "jp2" (the default) that makes it + possible to store non-rgb pix_fmts. + + tile_width integer + Sets tile width. Range is 1 to 1073741824. Default is 256. + + tile_height integer + Sets tile height. Range is 1 to 1073741824. Default is 256. + + pred integer + Allows setting the discrete wavelet transform (DWT) type + + dwt97int (Lossy) + dwt53 (Lossless) + + Default is "dwt97int" + + sop boolean + Enable this to add SOP marker at the start of each packet. Disabled + by default. + + eph boolean + Enable this to add EPH marker at the end of each packet header. + Disabled by default. + + prog integer + Sets the progression order to be used by the encoder. Possible + values are: + + lrcp + rlcp + rpcl + pcrl + cprl + + Set to "lrcp" by default. + + layer_rates string + By default, when this option is not used, compression is done using + the quality metric. This option allows for compression using + compression ratio. The compression ratio for each level could be + specified. The compression ratio of a layer "l" species the what + ratio of total file size is contained in the first "l" layers. + + Example usage: + + ffmpeg -i input.bmp -c:v jpeg2000 -layer_rates "100,10,1" output.j2k + + This would compress the image to contain 3 layers, where the data + contained in the first layer would be compressed by 1000 times, + compressed by 100 in the first two layers, and shall contain all + data while using all 3 layers. + + librav1e + rav1e AV1 encoder wrapper. + + Requires the presence of the rav1e headers and library during + configuration. You need to explicitly configure the build with + "--enable-librav1e". + + Options + + qmax + Sets the maximum quantizer to use when using bitrate mode. + + qmin + Sets the minimum quantizer to use when using bitrate mode. + + qp Uses quantizer mode to encode at the given quantizer (0-255). + + speed + Selects the speed preset (0-10) to encode with. + + tiles + Selects how many tiles to encode with. + + tile-rows + Selects how many rows of tiles to encode with. + + tile-columns + Selects how many columns of tiles to encode with. + + rav1e-params + Set rav1e options using a list of key=value pairs separated by ":". + See rav1e --help for a list of options. + + For example to specify librav1e encoding options with + -rav1e-params: + + ffmpeg -i input -c:v librav1e -b:v 500K -rav1e-params speed=5:low_latency=true output.mp4 + + libaom-av1 + libaom AV1 encoder wrapper. + + Requires the presence of the libaom headers and library during + configuration. You need to explicitly configure the build with + "--enable-libaom". + + Options + + The wrapper supports the following standard libavcodec options: + + b Set bitrate target in bits/second. By default this will use + variable-bitrate mode. If maxrate and minrate are also set to the + same value then it will use constant-bitrate mode, otherwise if crf + is set as well then it will use constrained-quality mode. + + g keyint_min + Set key frame placement. The GOP size sets the maximum distance + between key frames; if zero the output stream will be intra-only. + The minimum distance is ignored unless it is the same as the GOP + size, in which case key frames will always appear at a fixed + interval. Not set by default, so without this option the library + has completely free choice about where to place key frames. + + qmin qmax + Set minimum/maximum quantisation values. Valid range is from 0 to + 63 (warning: this does not match the quantiser values actually used + by AV1 - divide by four to map real quantiser values to this + range). Defaults to min/max (no constraint). + + minrate maxrate bufsize rc_init_occupancy + Set rate control buffering parameters. Not used if not set - + defaults to unconstrained variable bitrate. + + threads + Set the number of threads to use while encoding. This may require + the tiles or row-mt options to also be set to actually use the + specified number of threads fully. Defaults to the number of + hardware threads supported by the host machine. + + profile + Set the encoding profile. Defaults to using the profile which + matches the bit depth and chroma subsampling of the input. + + The wrapper also has some specific options: + + cpu-used + Set the quality/encoding speed tradeoff. Valid range is from 0 to + 8, higher numbers indicating greater speed and lower quality. The + default value is 1, which will be slow and high quality. + + auto-alt-ref + Enable use of alternate reference frames. Defaults to the internal + default of the library. + + arnr-max-frames (frames) + Set altref noise reduction max frame count. Default is -1. + + arnr-strength (strength) + Set altref noise reduction filter strength. Range is -1 to 6. + Default is -1. + + aq-mode (aq-mode) + Set adaptive quantization mode. Possible values: + + none (0) + Disabled. + + variance (1) + Variance-based. + + complexity (2) + Complexity-based. + + cyclic (3) + Cyclic refresh. + + tune (tune) + Set the distortion metric the encoder is tuned with. Default is + "psnr". + + psnr (0) + ssim (1) + lag-in-frames + Set the maximum number of frames which the encoder may keep in + flight at any one time for lookahead purposes. Defaults to the + internal default of the library. + + error-resilience + Enable error resilience features: + + default + Improve resilience against losses of whole frames. + + Not enabled by default. + + crf Set the quality/size tradeoff for constant-quality (no bitrate + target) and constrained-quality (with maximum bitrate target) + modes. Valid range is 0 to 63, higher numbers indicating lower + quality and smaller output size. Only used if set; by default only + the bitrate target is used. + + static-thresh + Set a change threshold on blocks below which they will be skipped + by the encoder. Defined in arbitrary units as a nonnegative + integer, defaulting to zero (no blocks are skipped). + + drop-threshold + Set a threshold for dropping frames when close to rate control + bounds. Defined as a percentage of the target buffer - when the + rate control buffer falls below this percentage, frames will be + dropped until it has refilled above the threshold. Defaults to + zero (no frames are dropped). + + denoise-noise-level (level) + Amount of noise to be removed for grain synthesis. Grain synthesis + is disabled if this option is not set or set to 0. + + denoise-block-size (pixels) + Block size used for denoising for grain synthesis. If not set, AV1 + codec uses the default value of 32. + + undershoot-pct (pct) + Set datarate undershoot (min) percentage of the target bitrate. + Range is -1 to 100. Default is -1. + + overshoot-pct (pct) + Set datarate overshoot (max) percentage of the target bitrate. + Range is -1 to 1000. Default is -1. + + minsection-pct (pct) + Minimum percentage variation of the GOP bitrate from the target + bitrate. If minsection-pct is not set, the libaomenc wrapper + computes it as follows: "(minrate * 100 / bitrate)". Range is -1 + to 100. Default is -1 (unset). + + maxsection-pct (pct) + Maximum percentage variation of the GOP bitrate from the target + bitrate. If maxsection-pct is not set, the libaomenc wrapper + computes it as follows: "(maxrate * 100 / bitrate)". Range is -1 + to 5000. Default is -1 (unset). + + frame-parallel (boolean) + Enable frame parallel decodability features. Default is true. + + tiles + Set the number of tiles to encode the input video with, as columns + x rows. Larger numbers allow greater parallelism in both encoding + and decoding, but may decrease coding efficiency. Defaults to the + minimum number of tiles required by the size of the input video + (this is 1x1 (that is, a single tile) for sizes up to and including + 4K). + + tile-columns tile-rows + Set the number of tiles as log2 of the number of tile rows and + columns. Provided for compatibility with libvpx/VP9. + + row-mt (Requires libaom >= 1.0.0-759-g90a15f4f2) + Enable row based multi-threading. Disabled by default. + + enable-cdef (boolean) + Enable Constrained Directional Enhancement Filter. The libaom-av1 + encoder enables CDEF by default. + + enable-restoration (boolean) + Enable Loop Restoration Filter. Default is true for libaom-av1. + + enable-global-motion (boolean) + Enable the use of global motion for block prediction. Default is + true. + + enable-intrabc (boolean) + Enable block copy mode for intra block prediction. This mode is + useful for screen content. Default is true. + + enable-rect-partitions (boolean) (Requires libaom >= v2.0.0) + Enable rectangular partitions. Default is true. + + enable-1to4-partitions (boolean) (Requires libaom >= v2.0.0) + Enable 1:4/4:1 partitions. Default is true. + + enable-ab-partitions (boolean) (Requires libaom >= v2.0.0) + Enable AB shape partitions. Default is true. + + enable-angle-delta (boolean) (Requires libaom >= v2.0.0) + Enable angle delta intra prediction. Default is true. + + enable-cfl-intra (boolean) (Requires libaom >= v2.0.0) + Enable chroma predicted from luma intra prediction. Default is + true. + + enable-filter-intra (boolean) (Requires libaom >= v2.0.0) + Enable filter intra predictor. Default is true. + + enable-intra-edge-filter (boolean) (Requires libaom >= v2.0.0) + Enable intra edge filter. Default is true. + + enable-smooth-intra (boolean) (Requires libaom >= v2.0.0) + Enable smooth intra prediction mode. Default is true. + + enable-paeth-intra (boolean) (Requires libaom >= v2.0.0) + Enable paeth predictor in intra prediction. Default is true. + + enable-palette (boolean) (Requires libaom >= v2.0.0) + Enable palette prediction mode. Default is true. + + enable-flip-idtx (boolean) (Requires libaom >= v2.0.0) + Enable extended transform type, including FLIPADST_DCT, + DCT_FLIPADST, FLIPADST_FLIPADST, ADST_FLIPADST, FLIPADST_ADST, + IDTX, V_DCT, H_DCT, V_ADST, H_ADST, V_FLIPADST, H_FLIPADST. Default + is true. + + enable-tx64 (boolean) (Requires libaom >= v2.0.0) + Enable 64-pt transform. Default is true. + + reduced-tx-type-set (boolean) (Requires libaom >= v2.0.0) + Use reduced set of transform types. Default is false. + + use-intra-dct-only (boolean) (Requires libaom >= v2.0.0) + Use DCT only for INTRA modes. Default is false. + + use-inter-dct-only (boolean) (Requires libaom >= v2.0.0) + Use DCT only for INTER modes. Default is false. + + use-intra-default-tx-only (boolean) (Requires libaom >= v2.0.0) + Use Default-transform only for INTRA modes. Default is false. + + enable-ref-frame-mvs (boolean) (Requires libaom >= v2.0.0) + Enable temporal mv prediction. Default is true. + + enable-reduced-reference-set (boolean) (Requires libaom >= v2.0.0) + Use reduced set of single and compound references. Default is + false. + + enable-obmc (boolean) (Requires libaom >= v2.0.0) + Enable obmc. Default is true. + + enable-dual-filter (boolean) (Requires libaom >= v2.0.0) + Enable dual filter. Default is true. + + enable-diff-wtd-comp (boolean) (Requires libaom >= v2.0.0) + Enable difference-weighted compound. Default is true. + + enable-dist-wtd-comp (boolean) (Requires libaom >= v2.0.0) + Enable distance-weighted compound. Default is true. + + enable-onesided-comp (boolean) (Requires libaom >= v2.0.0) + Enable one sided compound. Default is true. + + enable-interinter-wedge (boolean) (Requires libaom >= v2.0.0) + Enable interinter wedge compound. Default is true. + + enable-interintra-wedge (boolean) (Requires libaom >= v2.0.0) + Enable interintra wedge compound. Default is true. + + enable-masked-comp (boolean) (Requires libaom >= v2.0.0) + Enable masked compound. Default is true. + + enable-interintra-comp (boolean) (Requires libaom >= v2.0.0) + Enable interintra compound. Default is true. + + enable-smooth-interintra (boolean) (Requires libaom >= v2.0.0) + Enable smooth interintra mode. Default is true. + + aom-params + Set libaom options using a list of key=value pairs separated by + ":". For a list of supported options, see aomenc --help under the + section "AV1 Specific Options". + + For example to specify libaom encoding options with -aom-params: + + ffmpeg -i input -c:v libaom-av1 -b:v 500K -aom-params tune=psnr:enable-tpl-model=1 output.mp4 + + libsvtav1 + SVT-AV1 encoder wrapper. + + Requires the presence of the SVT-AV1 headers and library during + configuration. You need to explicitly configure the build with + "--enable-libsvtav1". + + Options + + profile + Set the encoding profile. + + level + Set the operating point level. + + tier + Set the operating point tier. + + rc Set the rate control mode to use. + + Possible modes: + + cqp Constant quantizer: use fixed values of qindex (dependent on + the frame type) throughout the stream. This mode is the + default. + + vbr Variable bitrate: use a target bitrate for the whole stream. + + cvbr + Constrained variable bitrate: use a target bitrate for each + GOP. + + qmax + Set the maximum quantizer to use when using a bitrate mode. + + qmin + Set the minimum quantizer to use when using a bitrate mode. + + qp Set the quantizer used in cqp rate control mode (0-63). + + sc_detection + Enable scene change detection. + + la_depth + Set number of frames to look ahead (0-120). + + preset + Set the quality-speed tradeoff, in the range 0 to 8. Higher values + are faster but lower quality. Defaults to 8 (highest speed). + + tile_rows + Set log2 of the number of rows of tiles to use (0-6). + + tile_columns + Set log2 of the number of columns of tiles to use (0-4). + + libkvazaar + Kvazaar H.265/HEVC encoder. + + Requires the presence of the libkvazaar headers and library during + configuration. You need to explicitly configure the build with + --enable-libkvazaar. + + Options + + b Set target video bitrate in bit/s and enable rate control. + + kvazaar-params + Set kvazaar parameters as a list of name=value pairs separated by + commas (,). See kvazaar documentation for a list of options. + + libopenh264 + Cisco libopenh264 H.264/MPEG-4 AVC encoder wrapper. + + This encoder requires the presence of the libopenh264 headers and + library during configuration. You need to explicitly configure the + build with "--enable-libopenh264". The library is detected using pkg- + config. + + For more information about the library see . + + Options + + The following FFmpeg global options affect the configurations of the + libopenh264 encoder. + + b Set the bitrate (as a number of bits per second). + + g Set the GOP size. + + maxrate + Set the max bitrate (as a number of bits per second). + + flags +global_header + Set global header in the bitstream. + + slices + Set the number of slices, used in parallelized encoding. Default + value is 0. This is only used when slice_mode is set to fixed. + + slice_mode + Set slice mode. Can assume one of the following possible values: + + fixed + a fixed number of slices + + rowmb + one slice per row of macroblocks + + auto + automatic number of slices according to number of threads + + dyn dynamic slicing + + Default value is auto. + + loopfilter + Enable loop filter, if set to 1 (automatically enabled). To disable + set a value of 0. + + profile + Set profile restrictions. If set to the value of main enable CABAC + (set the "SEncParamExt.iEntropyCodingModeFlag" flag to 1). + + max_nal_size + Set maximum NAL size in bytes. + + allow_skip_frames + Allow skipping frames to hit the target bitrate if set to 1. + + libtheora + libtheora Theora encoder wrapper. + + Requires the presence of the libtheora headers and library during + configuration. You need to explicitly configure the build with + "--enable-libtheora". + + For more information about the libtheora project see + . + + Options + + The following global options are mapped to internal libtheora options + which affect the quality and the bitrate of the encoded stream. + + b Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. + In case VBR (Variable Bit Rate) mode is enabled this option is + ignored. + + flags + Used to enable constant quality mode (VBR) encoding through the + qscale flag, and to enable the "pass1" and "pass2" modes. + + g Set the GOP size. + + global_quality + Set the global quality as an integer in lambda units. + + Only relevant when VBR mode is enabled with "flags +qscale". The + value is converted to QP units by dividing it by "FF_QP2LAMBDA", + clipped in the [0 - 10] range, and then multiplied by 6.3 to get a + value in the native libtheora range [0-63]. A higher value + corresponds to a higher quality. + + q Enable VBR mode when set to a non-negative value, and set constant + quality value as a double floating point value in QP units. + + The value is clipped in the [0-10] range, and then multiplied by + 6.3 to get a value in the native libtheora range [0-63]. + + This option is valid only using the ffmpeg command-line tool. For + library interface users, use global_quality. + + Examples + + o Set maximum constant quality (VBR) encoding with ffmpeg: + + ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg + + o Use ffmpeg to convert a CBR 1000 kbps Theora video stream: + + ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg + + libvpx + VP8/VP9 format supported through libvpx. + + Requires the presence of the libvpx headers and library during + configuration. You need to explicitly configure the build with + "--enable-libvpx". + + Options + + The following options are supported by the libvpx wrapper. The + vpxenc-equivalent options or values are listed in parentheses for easy + migration. + + To reduce the duplication of documentation, only the private options + and some others requiring special attention are documented here. For + the documentation of the undocumented generic options, see the Codec + Options chapter. + + To get more documentation of the libvpx options, invoke the command + ffmpeg -h encoder=libvpx, ffmpeg -h encoder=libvpx-vp9 or vpxenc + --help. Further information is available in the libvpx API + documentation. + + b (target-bitrate) + Set bitrate in bits/s. Note that FFmpeg's b option is expressed in + bits/s, while vpxenc's target-bitrate is in kilobits/s. + + g (kf-max-dist) + keyint_min (kf-min-dist) + qmin (min-q) + qmax (max-q) + bufsize (buf-sz, buf-optimal-sz) + Set ratecontrol buffer size (in bits). Note vpxenc's options are + specified in milliseconds, the libvpx wrapper converts this value + as follows: "buf-sz = bufsize * 1000 / bitrate", "buf-optimal-sz = + bufsize * 1000 / bitrate * 5 / 6". + + rc_init_occupancy (buf-initial-sz) + Set number of bits which should be loaded into the rc buffer before + decoding starts. Note vpxenc's option is specified in milliseconds, + the libvpx wrapper converts this value as follows: + "rc_init_occupancy * 1000 / bitrate". + + undershoot-pct + Set datarate undershoot (min) percentage of the target bitrate. + + overshoot-pct + Set datarate overshoot (max) percentage of the target bitrate. + + skip_threshold (drop-frame) + qcomp (bias-pct) + maxrate (maxsection-pct) + Set GOP max bitrate in bits/s. Note vpxenc's option is specified as + a percentage of the target bitrate, the libvpx wrapper converts + this value as follows: "(maxrate * 100 / bitrate)". + + minrate (minsection-pct) + Set GOP min bitrate in bits/s. Note vpxenc's option is specified as + a percentage of the target bitrate, the libvpx wrapper converts + this value as follows: "(minrate * 100 / bitrate)". + + minrate, maxrate, b end-usage=cbr + "(minrate == maxrate == bitrate)". + + crf (end-usage=cq, cq-level) + tune (tune) + psnr (psnr) + ssim (ssim) + quality, deadline (deadline) + best + Use best quality deadline. Poorly named and quite slow, this + option should be avoided as it may give worse quality output + than good. + + good + Use good quality deadline. This is a good trade-off between + speed and quality when used with the cpu-used option. + + realtime + Use realtime quality deadline. + + speed, cpu-used (cpu-used) + Set quality/speed ratio modifier. Higher values speed up the encode + at the cost of quality. + + nr (noise-sensitivity) + static-thresh + Set a change threshold on blocks below which they will be skipped + by the encoder. + + slices (token-parts) + Note that FFmpeg's slices option gives the total number of + partitions, while vpxenc's token-parts is given as + "log2(partitions)". + + max-intra-rate + Set maximum I-frame bitrate as a percentage of the target bitrate. + A value of 0 means unlimited. + + force_key_frames + "VPX_EFLAG_FORCE_KF" + + Alternate reference frame related + auto-alt-ref + Enable use of alternate reference frames (2-pass only). Values + greater than 1 enable multi-layer alternate reference frames + (VP9 only). + + arnr-maxframes + Set altref noise reduction max frame count. + + arnr-type + Set altref noise reduction filter type: backward, forward, + centered. + + arnr-strength + Set altref noise reduction filter strength. + + rc-lookahead, lag-in-frames (lag-in-frames) + Set number of frames to look ahead for frametype and + ratecontrol. + + error-resilient + Enable error resiliency features. + + sharpness integer + Increase sharpness at the expense of lower PSNR. The valid range + is [0, 7]. + + ts-parameters + Sets the temporal scalability configuration using a :-separated + list of key=value pairs. For example, to specify temporal + scalability parameters with "ffmpeg": + + ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\ + ts_target_bitrate=250,500,1000:ts_rate_decimator=4,2,1:\ + ts_periodicity=4:ts_layer_id=0,2,1,2:ts_layering_mode=3 OUTPUT + + Below is a brief explanation of each of the parameters, please + refer to "struct vpx_codec_enc_cfg" in "vpx/vpx_encoder.h" for more + details. + + ts_number_layers + Number of temporal coding layers. + + ts_target_bitrate + Target bitrate for each temporal layer (in kbps). (bitrate + should be inclusive of the lower temporal layer). + + ts_rate_decimator + Frame rate decimation factor for each temporal layer. + + ts_periodicity + Length of the sequence defining frame temporal layer + membership. + + ts_layer_id + Template defining the membership of frames to temporal layers. + + ts_layering_mode + (optional) Selecting the temporal structure from a set of pre- + defined temporal layering modes. Currently supports the + following options. + + 0 No temporal layering flags are provided internally, relies + on flags being passed in using "metadata" field in + "AVFrame" with following keys. + + vp8-flags + Sets the flags passed into the encoder to indicate the + referencing scheme for the current frame. Refer to + function "vpx_codec_encode" in "vpx/vpx_encoder.h" for + more details. + + temporal_id + Explicitly sets the temporal id of the current frame to + encode. + + 2 Two temporal layers. 0-1... + + 3 Three temporal layers. 0-2-1-2...; with single reference + frame. + + 4 Same as option "3", except there is a dependency between + the two temporal layer 2 frames within the temporal period. + + VP9-specific options + lossless + Enable lossless mode. + + tile-columns + Set number of tile columns to use. Note this is given as + "log2(tile_columns)". For example, 8 tile columns would be + requested by setting the tile-columns option to 3. + + tile-rows + Set number of tile rows to use. Note this is given as + "log2(tile_rows)". For example, 4 tile rows would be requested + by setting the tile-rows option to 2. + + frame-parallel + Enable frame parallel decodability features. + + aq-mode + Set adaptive quantization mode (0: off (default), 1: variance + 2: complexity, 3: cyclic refresh, 4: equator360). + + colorspace color-space + Set input color space. The VP9 bitstream supports signaling the + following colorspaces: + + rgb sRGB + bt709 bt709 + unspecified unknown + bt470bg bt601 + smpte170m smpte170 + smpte240m smpte240 + bt2020_ncl bt2020 + row-mt boolean + Enable row based multi-threading. + + tune-content + Set content type: default (0), screen (1), film (2). + + corpus-complexity + Corpus VBR mode is a variant of standard VBR where the + complexity distribution midpoint is passed in rather than + calculated for a specific clip or chunk. + + The valid range is [0, 10000]. 0 (default) uses standard VBR. + + enable-tpl boolean + Enable temporal dependency model. + + ref-frame-config + Using per-frame metadata, set members of the structure + "vpx_svc_ref_frame_config_t" in "vpx/vp8cx.h" to fine-control + referencing schemes and frame buffer management. Use a + :-separated list of key=value pairs. For example, + + av_dict_set(&av_frame->metadata, "ref-frame-config", \ + "rfc_update_buffer_slot=7:rfc_lst_fb_idx=0:rfc_gld_fb_idx=1:rfc_alt_fb_idx=2:rfc_reference_last=0:rfc_reference_golden=0:rfc_reference_alt_ref=0"); + + rfc_update_buffer_slot + Indicates the buffer slot number to update + + rfc_update_last + Indicates whether to update the LAST frame + + rfc_update_golden + Indicates whether to update GOLDEN frame + + rfc_update_alt_ref + Indicates whether to update ALT_REF frame + + rfc_lst_fb_idx + LAST frame buffer index + + rfc_gld_fb_idx + GOLDEN frame buffer index + + rfc_alt_fb_idx + ALT_REF frame buffer index + + rfc_reference_last + Indicates whether to reference LAST frame + + rfc_reference_golden + Indicates whether to reference GOLDEN frame + + rfc_reference_alt_ref + Indicates whether to reference ALT_REF frame + + rfc_reference_duration + Indicates frame duration + + For more information about libvpx see: + + libwebp + libwebp WebP Image encoder wrapper + + libwebp is Google's official encoder for WebP images. It can encode in + either lossy or lossless mode. Lossy images are essentially a wrapper + around a VP8 frame. Lossless images are a separate codec developed by + Google. + + Pixel Format + + Currently, libwebp only supports YUV420 for lossy and RGB for lossless + due to limitations of the format and libwebp. Alpha is supported for + either mode. Because of API limitations, if RGB is passed in when + encoding lossy or YUV is passed in for encoding lossless, the pixel + format will automatically be converted using functions from libwebp. + This is not ideal and is done only for convenience. + + Options + + -lossless boolean + Enables/Disables use of lossless mode. Default is 0. + + -compression_level integer + For lossy, this is a quality/speed tradeoff. Higher values give + better quality for a given size at the cost of increased encoding + time. For lossless, this is a size/speed tradeoff. Higher values + give smaller size at the cost of increased encoding time. More + specifically, it controls the number of extra algorithms and + compression tools used, and varies the combination of these tools. + This maps to the method option in libwebp. The valid range is 0 to + 6. Default is 4. + + -qscale float + For lossy encoding, this controls image quality, 0 to 100. For + lossless encoding, this controls the effort and time spent at + compressing more. The default value is 75. Note that for usage via + libavcodec, this option is called global_quality and must be + multiplied by FF_QP2LAMBDA. + + -preset type + Configuration preset. This does some automatic settings based on + the general type of the image. + + none + Do not use a preset. + + default + Use the encoder default. + + picture + Digital picture, like portrait, inner shot + + photo + Outdoor photograph, with natural lighting + + drawing + Hand or line drawing, with high-contrast details + + icon + Small-sized colorful images + + text + Text-like + + libx264, libx264rgb + x264 H.264/MPEG-4 AVC encoder wrapper. + + This encoder requires the presence of the libx264 headers and library + during configuration. You need to explicitly configure the build with + "--enable-libx264". + + libx264 supports an impressive number of features, including 8x8 and + 4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC + entropy coding, interlacing (MBAFF), lossless mode, psy optimizations + for detail retention (adaptive quantization, psy-RD, psy-trellis). + + Many libx264 encoder options are mapped to FFmpeg global codec options, + while unique encoder options are provided through private options. + Additionally the x264opts and x264-params private options allows one to + pass a list of key=value tuples as accepted by the libx264 + "x264_param_parse" function. + + The x264 project website is at + . + + The libx264rgb encoder is the same as libx264, except it accepts packed + RGB pixel formats as input instead of YUV. + + Supported Pixel Formats + + x264 supports 8- to 10-bit color spaces. The exact bit depth is + controlled at x264's configure time. + + Options + + The following options are supported by the libx264 wrapper. The + x264-equivalent options or values are listed in parentheses for easy + migration. + + To reduce the duplication of documentation, only the private options + and some others requiring special attention are documented here. For + the documentation of the undocumented generic options, see the Codec + Options chapter. + + To get a more accurate and extensive documentation of the libx264 + options, invoke the command x264 --fullhelp or consult the libx264 + documentation. + + b (bitrate) + Set bitrate in bits/s. Note that FFmpeg's b option is expressed in + bits/s, while x264's bitrate is in kilobits/s. + + bf (bframes) + g (keyint) + qmin (qpmin) + Minimum quantizer scale. + + qmax (qpmax) + Maximum quantizer scale. + + qdiff (qpstep) + Maximum difference between quantizer scales. + + qblur (qblur) + Quantizer curve blur + + qcomp (qcomp) + Quantizer curve compression factor + + refs (ref) + Number of reference frames each P-frame can use. The range is from + 0-16. + + sc_threshold (scenecut) + Sets the threshold for the scene change detection. + + trellis (trellis) + Performs Trellis quantization to increase efficiency. Enabled by + default. + + nr (nr) + me_range (merange) + Maximum range of the motion search in pixels. + + me_method (me) + Set motion estimation method. Possible values in the decreasing + order of speed: + + dia (dia) + epzs (dia) + Diamond search with radius 1 (fastest). epzs is an alias for + dia. + + hex (hex) + Hexagonal search with radius 2. + + umh (umh) + Uneven multi-hexagon search. + + esa (esa) + Exhaustive search. + + tesa (tesa) + Hadamard exhaustive search (slowest). + + forced-idr + Normally, when forcing a I-frame type, the encoder can select any + type of I-frame. This option forces it to choose an IDR-frame. + + subq (subme) + Sub-pixel motion estimation method. + + b_strategy (b-adapt) + Adaptive B-frame placement decision algorithm. Use only on first- + pass. + + keyint_min (min-keyint) + Minimum GOP size. + + coder + Set entropy encoder. Possible values: + + ac Enable CABAC. + + vlc Enable CAVLC and disable CABAC. It generates the same effect as + x264's --no-cabac option. + + cmp Set full pixel motion estimation comparison algorithm. Possible + values: + + chroma + Enable chroma in motion estimation. + + sad Ignore chroma in motion estimation. It generates the same + effect as x264's --no-chroma-me option. + + threads (threads) + Number of encoding threads. + + thread_type + Set multithreading technique. Possible values: + + slice + Slice-based multithreading. It generates the same effect as + x264's --sliced-threads option. + + frame + Frame-based multithreading. + + flags + Set encoding flags. It can be used to disable closed GOP and enable + open GOP by setting it to "-cgop". The result is similar to the + behavior of x264's --open-gop option. + + rc_init_occupancy (vbv-init) + preset (preset) + Set the encoding preset. + + tune (tune) + Set tuning of the encoding params. + + profile (profile) + Set profile restrictions. + + fastfirstpass + Enable fast settings when encoding first pass, when set to 1. When + set to 0, it has the same effect of x264's --slow-firstpass option. + + crf (crf) + Set the quality for constant quality mode. + + crf_max (crf-max) + In CRF mode, prevents VBV from lowering quality beyond this point. + + qp (qp) + Set constant quantization rate control method parameter. + + aq-mode (aq-mode) + Set AQ method. Possible values: + + none (0) + Disabled. + + variance (1) + Variance AQ (complexity mask). + + autovariance (2) + Auto-variance AQ (experimental). + + aq-strength (aq-strength) + Set AQ strength, reduce blocking and blurring in flat and textured + areas. + + psy Use psychovisual optimizations when set to 1. When set to 0, it has + the same effect as x264's --no-psy option. + + psy-rd (psy-rd) + Set strength of psychovisual optimization, in psy-rd:psy-trellis + format. + + rc-lookahead (rc-lookahead) + Set number of frames to look ahead for frametype and ratecontrol. + + weightb + Enable weighted prediction for B-frames when set to 1. When set to + 0, it has the same effect as x264's --no-weightb option. + + weightp (weightp) + Set weighted prediction method for P-frames. Possible values: + + none (0) + Disabled + + simple (1) + Enable only weighted refs + + smart (2) + Enable both weighted refs and duplicates + + ssim (ssim) + Enable calculation and printing SSIM stats after the encoding. + + intra-refresh (intra-refresh) + Enable the use of Periodic Intra Refresh instead of IDR frames when + set to 1. + + avcintra-class (class) + Configure the encoder to generate AVC-Intra. Valid values are + 50,100 and 200 + + bluray-compat (bluray-compat) + Configure the encoder to be compatible with the bluray standard. + It is a shorthand for setting "bluray-compat=1 force-cfr=1". + + b-bias (b-bias) + Set the influence on how often B-frames are used. + + b-pyramid (b-pyramid) + Set method for keeping of some B-frames as references. Possible + values: + + none (none) + Disabled. + + strict (strict) + Strictly hierarchical pyramid. + + normal (normal) + Non-strict (not Blu-ray compatible). + + mixed-refs + Enable the use of one reference per partition, as opposed to one + reference per macroblock when set to 1. When set to 0, it has the + same effect as x264's --no-mixed-refs option. + + 8x8dct + Enable adaptive spatial transform (high profile 8x8 transform) when + set to 1. When set to 0, it has the same effect as x264's + --no-8x8dct option. + + fast-pskip + Enable early SKIP detection on P-frames when set to 1. When set to + 0, it has the same effect as x264's --no-fast-pskip option. + + aud (aud) + Enable use of access unit delimiters when set to 1. + + mbtree + Enable use macroblock tree ratecontrol when set to 1. When set to + 0, it has the same effect as x264's --no-mbtree option. + + deblock (deblock) + Set loop filter parameters, in alpha:beta form. + + cplxblur (cplxblur) + Set fluctuations reduction in QP (before curve compression). + + partitions (partitions) + Set partitions to consider as a comma-separated list of. Possible + values in the list: + + p8x8 + 8x8 P-frame partition. + + p4x4 + 4x4 P-frame partition. + + b8x8 + 4x4 B-frame partition. + + i8x8 + 8x8 I-frame partition. + + i4x4 + 4x4 I-frame partition. (Enabling p4x4 requires p8x8 to be + enabled. Enabling i8x8 requires adaptive spatial transform + (8x8dct option) to be enabled.) + + none (none) + Do not consider any partitions. + + all (all) + Consider every partition. + + direct-pred (direct) + Set direct MV prediction mode. Possible values: + + none (none) + Disable MV prediction. + + spatial (spatial) + Enable spatial predicting. + + temporal (temporal) + Enable temporal predicting. + + auto (auto) + Automatically decided. + + slice-max-size (slice-max-size) + Set the limit of the size of each slice in bytes. If not specified + but RTP payload size (ps) is specified, that is used. + + stats (stats) + Set the file name for multi-pass stats. + + nal-hrd (nal-hrd) + Set signal HRD information (requires vbv-bufsize to be set). + Possible values: + + none (none) + Disable HRD information signaling. + + vbr (vbr) + Variable bit rate. + + cbr (cbr) + Constant bit rate (not allowed in MP4 container). + + x264opts (N.A.) + Set any x264 option, see x264 --fullhelp for a list. + + Argument is a list of key=value couples separated by ":". In filter + and psy-rd options that use ":" as a separator themselves, use "," + instead. They accept it as well since long ago but this is kept + undocumented for some reason. + + For example to specify libx264 encoding options with ffmpeg: + + ffmpeg -i foo.mpg -c:v libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv + + a53cc boolean + Import closed captions (which must be ATSC compatible format) into + output. Only the mpeg2 and h264 decoders provide these. Default is + 1 (on). + + x264-params (N.A.) + Override the x264 configuration using a :-separated list of + key=value parameters. + + This option is functionally the same as the x264opts, but is + duplicated for compatibility with the Libav fork. + + For example to specify libx264 encoding options with ffmpeg: + + ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\ + cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\ + no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT + + Encoding ffpresets for common usages are provided so they can be used + with the general presets system (e.g. passing the pre option). + + libx265 + x265 H.265/HEVC encoder wrapper. + + This encoder requires the presence of the libx265 headers and library + during configuration. You need to explicitly configure the build with + --enable-libx265. + + Options + + b Sets target video bitrate. + + bf + g Set the GOP size. + + keyint_min + Minimum GOP size. + + refs + Number of reference frames each P-frame can use. The range is from + 1-16. + + preset + Set the x265 preset. + + tune + Set the x265 tune parameter. + + profile + Set profile restrictions. + + crf Set the quality for constant quality mode. + + qp Set constant quantization rate control method parameter. + + qmin + Minimum quantizer scale. + + qmax + Maximum quantizer scale. + + qdiff + Maximum difference between quantizer scales. + + qblur + Quantizer curve blur + + qcomp + Quantizer curve compression factor + + i_qfactor + b_qfactor + forced-idr + Normally, when forcing a I-frame type, the encoder can select any + type of I-frame. This option forces it to choose an IDR-frame. + + x265-params + Set x265 options using a list of key=value couples separated by + ":". See x265 --help for a list of options. + + For example to specify libx265 encoding options with -x265-params: + + ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4 + + libxavs2 + xavs2 AVS2-P2/IEEE1857.4 encoder wrapper. + + This encoder requires the presence of the libxavs2 headers and library + during configuration. You need to explicitly configure the build with + --enable-libxavs2. + + The following standard libavcodec options are used: + + o b / bit_rate + + o g / gop_size + + o bf / max_b_frames + + The encoder also has its own specific options: + + Options + + lcu_row_threads + Set the number of parallel threads for rows from 1 to 8 (default + 5). + + initial_qp + Set the xavs2 quantization parameter from 1 to 63 (default 34). + This is used to set the initial qp for the first frame. + + qp Set the xavs2 quantization parameter from 1 to 63 (default 34). + This is used to set the qp value under constant-QP mode. + + max_qp + Set the max qp for rate control from 1 to 63 (default 55). + + min_qp + Set the min qp for rate control from 1 to 63 (default 20). + + speed_level + Set the Speed level from 0 to 9 (default 0). Higher is better but + slower. + + log_level + Set the log level from -1 to 3 (default 0). -1: none, 0: error, 1: + warning, 2: info, 3: debug. + + xavs2-params + Set xavs2 options using a list of key=value couples separated by + ":". + + For example to specify libxavs2 encoding options with + -xavs2-params: + + ffmpeg -i input -c:v libxavs2 -xavs2-params RdoqLevel=0 output.avs2 + + libxvid + Xvid MPEG-4 Part 2 encoder wrapper. + + This encoder requires the presence of the libxvidcore headers and + library during configuration. You need to explicitly configure the + build with "--enable-libxvid --enable-gpl". + + The native "mpeg4" encoder supports the MPEG-4 Part 2 format, so users + can encode to this format without this library. + + Options + + The following options are supported by the libxvid wrapper. Some of the + following options are listed but are not documented, and correspond to + shared codec options. See the Codec Options chapter for their + documentation. The other shared options which are not listed have no + effect for the libxvid encoder. + + b + g + qmin + qmax + mpeg_quant + threads + bf + b_qfactor + b_qoffset + flags + Set specific encoding flags. Possible values: + + mv4 Use four motion vector by macroblock. + + aic Enable high quality AC prediction. + + gray + Only encode grayscale. + + gmc Enable the use of global motion compensation (GMC). + + qpel + Enable quarter-pixel motion compensation. + + cgop + Enable closed GOP. + + global_header + Place global headers in extradata instead of every keyframe. + + trellis + me_method + Set motion estimation method. Possible values in decreasing order + of speed and increasing order of quality: + + zero + Use no motion estimation (default). + + phods + x1 + log Enable advanced diamond zonal search for 16x16 blocks and half- + pixel refinement for 16x16 blocks. x1 and log are aliases for + phods. + + epzs + Enable all of the things described above, plus advanced diamond + zonal search for 8x8 blocks, half-pixel refinement for 8x8 + blocks, and motion estimation on chroma planes. + + full + Enable all of the things described above, plus extended 16x16 + and 8x8 blocks search. + + mbd Set macroblock decision algorithm. Possible values in the + increasing order of quality: + + simple + Use macroblock comparing function algorithm (default). + + bits + Enable rate distortion-based half pixel and quarter pixel + refinement for 16x16 blocks. + + rd Enable all of the things described above, plus rate distortion- + based half pixel and quarter pixel refinement for 8x8 blocks, + and rate distortion-based search using square pattern. + + lumi_aq + Enable lumi masking adaptive quantization when set to 1. Default is + 0 (disabled). + + variance_aq + Enable variance adaptive quantization when set to 1. Default is 0 + (disabled). + + When combined with lumi_aq, the resulting quality will not be + better than any of the two specified individually. In other words, + the resulting quality will be the worse one of the two effects. + + ssim + Set structural similarity (SSIM) displaying method. Possible + values: + + off Disable displaying of SSIM information. + + avg Output average SSIM at the end of encoding to stdout. The + format of showing the average SSIM is: + + Average SSIM: %f + + For users who are not familiar with C, %f means a float number, + or a decimal (e.g. 0.939232). + + frame + Output both per-frame SSIM data during encoding and average + SSIM at the end of encoding to stdout. The format of per-frame + information is: + + SSIM: avg: %1.3f min: %1.3f max: %1.3f + + For users who are not familiar with C, %1.3f means a float + number rounded to 3 digits after the dot (e.g. 0.932). + + ssim_acc + Set SSIM accuracy. Valid options are integers within the range of + 0-4, while 0 gives the most accurate result and 4 computes the + fastest. + + MediaFoundation + This provides wrappers to encoders (both audio and video) in the + MediaFoundation framework. It can access both SW and HW encoders. + Video encoders can take input in either of nv12 or yuv420p form (some + encoders support both, some support only either - in practice, nv12 is + the safer choice, especially among HW encoders). + + mpeg2 + MPEG-2 video encoder. + + Options + + profile + Select the mpeg2 profile to encode: + + 422 + high + ss Spatially Scalable + + snr SNR Scalable + + main + simple + level + Select the mpeg2 level to encode: + + high + high1440 + main + low + seq_disp_ext integer + Specifies if the encoder should write a sequence_display_extension + to the output. + + -1 + auto + Decide automatically to write it or not (this is the default) + by checking if the data to be written is different from the + default or unspecified values. + + 0 + never + Never write it. + + 1 + always + Always write it. + + video_format integer + Specifies the video_format written into the sequence display + extension indicating the source of the video pictures. The default + is unspecified, can be component, pal, ntsc, secam or mac. For + maximum compatibility, use component. + + a53cc boolean + Import closed captions (which must be ATSC compatible format) into + output. Default is 1 (on). + + png + PNG image encoder. + + Private options + + dpi integer + Set physical density of pixels, in dots per inch, unset by default + + dpm integer + Set physical density of pixels, in dots per meter, unset by default + + ProRes + Apple ProRes encoder. + + FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder. + The used encoder can be chosen with the "-vcodec" option. + + Private Options for prores-ks + + profile integer + Select the ProRes profile to encode + + proxy + lt + standard + hq + 4444 + 4444xq + quant_mat integer + Select quantization matrix. + + auto + default + proxy + lt + standard + hq + + If set to auto, the matrix matching the profile will be picked. If + not set, the matrix providing the highest quality, default, will be + picked. + + bits_per_mb integer + How many bits to allot for coding one macroblock. Different + profiles use between 200 and 2400 bits per macroblock, the maximum + is 8000. + + mbs_per_slice integer + Number of macroblocks in each slice (1-8); the default value (8) + should be good in almost all situations. + + vendor string + Override the 4-byte vendor ID. A custom vendor ID like apl0 would + claim the stream was produced by the Apple encoder. + + alpha_bits integer + Specify number of bits for alpha component. Possible values are 0, + 8 and 16. Use 0 to disable alpha plane coding. + + Speed considerations + + In the default mode of operation the encoder has to honor frame + constraints (i.e. not produce frames with size bigger than requested) + while still making output picture as good as possible. A frame + containing a lot of small details is harder to compress and the encoder + would spend more time searching for appropriate quantizers for each + slice. + + Setting a higher bits_per_mb limit will improve the speed. + + For the fastest encoding speed set the qscale parameter (4 is the + recommended value) and do not set a size constraint. + + QSV encoders + The family of Intel QuickSync Video encoders (MPEG-2, H.264, HEVC, + JPEG/MJPEG and VP9) + + The ratecontrol method is selected as follows: + + o When global_quality is specified, a quality-based mode is used. + Specifically this means either + + - CQP - constant quantizer scale, when the qscale codec flag is + also set (the -qscale ffmpeg option). + + - LA_ICQ - intelligent constant quality with lookahead, when the + look_ahead option is also set. + + - ICQ -- intelligent constant quality otherwise. + + o Otherwise, a bitrate-based mode is used. For all of those, you + should specify at least the desired average bitrate with the b + option. + + - LA - VBR with lookahead, when the look_ahead option is + specified. + + - VCM - video conferencing mode, when the vcm option is set. + + - CBR - constant bitrate, when maxrate is specified and equal to + the average bitrate. + + - VBR - variable bitrate, when maxrate is specified, but is + higher than the average bitrate. + + - AVBR - average VBR mode, when maxrate is not specified. This + mode is further configured by the avbr_accuracy and + avbr_convergence options. + + Note that depending on your system, a different mode than the one you + specified may be selected by the encoder. Set the verbosity level to + verbose or higher to see the actual settings used by the QSV runtime. + + Additional libavcodec global options are mapped to MSDK options as + follows: + + o g/gop_size -> GopPicSize + + o bf/max_b_frames+1 -> GopRefDist + + o rc_init_occupancy/rc_initial_buffer_occupancy -> InitialDelayInKB + + o slices -> NumSlice + + o refs -> NumRefFrame + + o b_strategy/b_frame_strategy -> BRefType + + o cgop/CLOSED_GOP codec flag -> GopOptFlag + + o For the CQP mode, the i_qfactor/i_qoffset and b_qfactor/b_qoffset + set the difference between QPP and QPI, and QPP and QPB + respectively. + + o Setting the coder option to the value vlc will make the H.264 + encoder use CAVLC instead of CABAC. + + snow + Options + + iterative_dia_size + dia size for the iterative motion estimation + + VAAPI encoders + Wrappers for hardware encoders accessible via VAAPI. + + These encoders only accept input in VAAPI hardware surfaces. If you + have input in software frames, use the hwupload filter to upload them + to the GPU. + + The following standard libavcodec options are used: + + o g / gop_size + + o bf / max_b_frames + + o profile + + If not set, this will be determined automatically from the format + of the input frames and the profiles supported by the driver. + + o level + + o b / bit_rate + + o maxrate / rc_max_rate + + o bufsize / rc_buffer_size + + o rc_init_occupancy / rc_initial_buffer_occupancy + + o compression_level + + Speed / quality tradeoff: higher values are faster / worse quality. + + o q / global_quality + + Size / quality tradeoff: higher values are smaller / worse quality. + + o qmin + + o qmax + + o i_qfactor / i_quant_factor + + o i_qoffset / i_quant_offset + + o b_qfactor / b_quant_factor + + o b_qoffset / b_quant_offset + + o slices + + All encoders support the following options: + + low_power + Some drivers/platforms offer a second encoder for some codecs + intended to use less power than the default encoder; setting this + option will attempt to use that encoder. Note that it may support + a reduced feature set, so some other options may not be available + in this mode. + + idr_interval + Set the number of normal intra frames between full-refresh (IDR) + frames in open-GOP mode. The intra frames are still IRAPs, but + will not include global headers and may have non-decodable leading + pictures. + + b_depth + Set the B-frame reference depth. When set to one (the default), + all B-frames will refer only to P- or I-frames. When set to + greater values multiple layers of B-frames will be present, frames + in each layer only referring to frames in higher layers. + + rc_mode + Set the rate control mode to use. A given driver may only support + a subset of modes. + + Possible modes: + + auto + Choose the mode automatically based on driver support and the + other options. This is the default. + + CQP Constant-quality. + + CBR Constant-bitrate. + + VBR Variable-bitrate. + + ICQ Intelligent constant-quality. + + QVBR + Quality-defined variable-bitrate. + + AVBR + Average variable bitrate. + + Each encoder also has its own specific options: + + h264_vaapi + profile sets the value of profile_idc and the + constraint_set*_flags. level sets the value of level_idc. + + coder + Set entropy encoder (default is cabac). Possible values: + + ac + cabac + Use CABAC. + + vlc + cavlc + Use CAVLC. + + aud Include access unit delimiters in the stream (not included by + default). + + sei Set SEI message types to include. Some combination of the + following values: + + identifier + Include a user_data_unregistered message containing + information about the encoder. + + timing + Include picture timing parameters (buffering_period and + pic_timing messages). + + recovery_point + Include recovery points where appropriate (recovery_point + messages). + + hevc_vaapi + profile and level set the values of general_profile_idc and + general_level_idc respectively. + + aud Include access unit delimiters in the stream (not included by + default). + + tier + Set general_tier_flag. This may affect the level chosen for + the stream if it is not explicitly specified. + + sei Set SEI message types to include. Some combination of the + following values: + + hdr Include HDR metadata if the input frames have it + (mastering_display_colour_volume and content_light_level + messages). + + tiles + Set the number of tiles to encode the input video with, as + columns x rows. Larger numbers allow greater parallelism in + both encoding and decoding, but may decrease coding efficiency. + + mjpeg_vaapi + Only baseline DCT encoding is supported. The encoder always uses + the standard quantisation and huffman tables - global_quality + scales the standard quantisation table (range 1-100). + + For YUV, 4:2:0, 4:2:2 and 4:4:4 subsampling modes are supported. + RGB is also supported, and will create an RGB JPEG. + + jfif + Include JFIF header in each frame (not included by default). + + huffman + Include standard huffman tables (on by default). Turning this + off will save a few hundred bytes in each output frame, but may + lose compatibility with some JPEG decoders which don't fully + handle MJPEG. + + mpeg2_vaapi + profile and level set the value of profile_and_level_indication. + + vp8_vaapi + B-frames are not supported. + + global_quality sets the q_idx used for non-key frames (range + 0-127). + + loop_filter_level + loop_filter_sharpness + Manually set the loop filter parameters. + + vp9_vaapi + global_quality sets the q_idx used for P-frames (range 0-255). + + loop_filter_level + loop_filter_sharpness + Manually set the loop filter parameters. + + B-frames are supported, but the output stream is always in encode + order rather than display order. If B-frames are enabled, it may + be necessary to use the vp9_raw_reorder bitstream filter to modify + the output stream to display frames in the correct order. + + Only normal frames are produced - the vp9_superframe bitstream + filter may be required to produce a stream usable with all + decoders. + + vc2 + SMPTE VC-2 (previously BBC Dirac Pro). This codec was primarily aimed + at professional broadcasting but since it supports yuv420, yuv422 and + yuv444 at 8 (limited range or full range), 10 or 12 bits, this makes it + suitable for other tasks which require low overhead and low compression + (like screen recording). + + Options + + b Sets target video bitrate. Usually that's around 1:6 of the + uncompressed video bitrate (e.g. for 1920x1080 50fps yuv422p10 + that's around 400Mbps). Higher values (close to the uncompressed + bitrate) turn on lossless compression mode. + + field_order + Enables field coding when set (e.g. to tt - top field first) for + interlaced inputs. Should increase compression with interlaced + content as it splits the fields and encodes each separately. + + wavelet_depth + Sets the total amount of wavelet transforms to apply, between 1 and + 5 (default). Lower values reduce compression and quality. Less + capable decoders may not be able to handle values of wavelet_depth + over 3. + + wavelet_type + Sets the transform type. Currently only 5_3 (LeGall) and 9_7 + (Deslauriers-Dubuc) are implemented, with 9_7 being the one with + better compression and thus is the default. + + slice_width + slice_height + Sets the slice size for each slice. Larger values result in better + compression. For compatibility with other more limited decoders + use slice_width of 32 and slice_height of 8. + + tolerance + Sets the undershoot tolerance of the rate control system in + percent. This is to prevent an expensive search from being run. + + qm Sets the quantization matrix preset to use by default or when + wavelet_depth is set to 5 + + - default Uses the default quantization matrix from the + specifications, extended with values for the fifth level. This + provides a good balance between keeping detail and omitting + artifacts. + + - flat Use a completely zeroed out quantization matrix. This + increases PSNR but might reduce perception. Use in bogus + benchmarks. + + - color Reduces detail but attempts to preserve color at + extremely low bitrates. + +SUBTITLES ENCODERS + dvdsub + This codec encodes the bitmap subtitle format that is used in DVDs. + Typically they are stored in VOBSUB file pairs (*.idx + *.sub), and + they can also be used in Matroska files. + + Options + + palette + Specify the global palette used by the bitmaps. + + The format for this option is a string containing 16 24-bits + hexadecimal numbers (without 0x prefix) separated by commas, for + example "0d00ee, ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, + 0d617a, 7b7b7b, d1d1d1, 7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, + 7c127b". + + even_rows_fix + When set to 1, enable a work-around that makes the number of pixel + rows even in all subtitles. This fixes a problem with some players + that cut off the bottom row if the number is odd. The work-around + just adds a fully transparent row if needed. The overhead is low, + typically one byte per subtitle on average. + + By default, this work-around is disabled. + +BITSTREAM FILTERS + When you configure your FFmpeg build, all the supported bitstream + filters are enabled by default. You can list all available ones using + the configure option "--list-bsfs". + + You can disable all the bitstream filters using the configure option + "--disable-bsfs", and selectively enable any bitstream filter using the + option "--enable-bsf=BSF", or you can disable a particular bitstream + filter using the option "--disable-bsf=BSF". + + The option "-bsfs" of the ff* tools will display the list of all the + supported bitstream filters included in your build. + + The ff* tools have a -bsf option applied per stream, taking a comma- + separated list of filters, whose parameters follow the filter name + after a '='. + + ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1:opt2=str2][,filter2] OUTPUT + + Below is a description of the currently available bitstream filters, + with their parameters, if any. + + aac_adtstoasc + Convert MPEG-2/4 AAC ADTS to an MPEG-4 Audio Specific Configuration + bitstream. + + This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 ADTS + header and removes the ADTS header. + + This filter is required for example when copying an AAC stream from a + raw ADTS AAC or an MPEG-TS container to MP4A-LATM, to an FLV file, or + to MOV/MP4 files and related formats such as 3GP or M4A. Please note + that it is auto-inserted for MP4A-LATM and MOV/MP4 and related formats. + + av1_metadata + Modify metadata embedded in an AV1 stream. + + td Insert or remove temporal delimiter OBUs in all temporal units of + the stream. + + insert + Insert a TD at the beginning of every TU which does not already + have one. + + remove + Remove the TD from the beginning of every TU which has one. + + color_primaries + transfer_characteristics + matrix_coefficients + Set the color description fields in the stream (see AV1 section + 6.4.2). + + color_range + Set the color range in the stream (see AV1 section 6.4.2; note that + this cannot be set for streams using BT.709 primaries, sRGB + transfer characteristic and identity (RGB) matrix coefficients). + + tv Limited range. + + pc Full range. + + chroma_sample_position + Set the chroma sample location in the stream (see AV1 section + 6.4.2). This can only be set for 4:2:0 streams. + + vertical + Left position (matching the default in MPEG-2 and H.264). + + colocated + Top-left position. + + tick_rate + Set the tick rate (num_units_in_display_tick / time_scale) in the + timing info in the sequence header. + + num_ticks_per_picture + Set the number of ticks in each picture, to indicate that the + stream has a fixed framerate. Ignored if tick_rate is not also + set. + + delete_padding + Deletes Padding OBUs. + + chomp + Remove zero padding at the end of a packet. + + dca_core + Extract the core from a DCA/DTS stream, dropping extensions such as + DTS-HD. + + dump_extra + Add extradata to the beginning of the filtered packets except when said + packets already exactly begin with the extradata that is intended to be + added. + + freq + The additional argument specifies which packets should be filtered. + It accepts the values: + + k + keyframe + add extradata to all key packets + + e + all add extradata to all packets + + If not specified it is assumed k. + + For example the following ffmpeg command forces a global header (thus + disabling individual packet headers) in the H.264 packets generated by + the "libx264" encoder, but corrects them by adding the header stored in + extradata to the key packets: + + ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts + + eac3_core + Extract the core from a E-AC-3 stream, dropping extra channels. + + extract_extradata + Extract the in-band extradata. + + Certain codecs allow the long-term headers (e.g. MPEG-2 sequence + headers, or H.264/HEVC (VPS/)SPS/PPS) to be transmitted either "in- + band" (i.e. as a part of the bitstream containing the coded frames) or + "out of band" (e.g. on the container level). This latter form is called + "extradata" in FFmpeg terminology. + + This bitstream filter detects the in-band headers and makes them + available as extradata. + + remove + When this option is enabled, the long-term headers are removed from + the bitstream after extraction. + + filter_units + Remove units with types in or not in a given set from the stream. + + pass_types + List of unit types or ranges of unit types to pass through while + removing all others. This is specified as a '|'-separated list of + unit type values or ranges of values with '-'. + + remove_types + Identical to pass_types, except the units in the given set removed + and all others passed through. + + Extradata is unchanged by this transformation, but note that if the + stream contains inline parameter sets then the output may be unusable + if they are removed. + + For example, to remove all non-VCL NAL units from an H.264 stream: + + ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=pass_types=1-5' OUTPUT + + To remove all AUDs, SEI and filler from an H.265 stream: + + ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=35|38-40' OUTPUT + + hapqa_extract + Extract Rgb or Alpha part of an HAPQA file, without recompression, in + order to create an HAPQ or an HAPAlphaOnly file. + + texture + Specifies the texture to keep. + + color + alpha + + Convert HAPQA to HAPQ + + ffmpeg -i hapqa_inputfile.mov -c copy -bsf:v hapqa_extract=texture=color -tag:v HapY -metadata:s:v:0 encoder="HAPQ" hapq_file.mov + + Convert HAPQA to HAPAlphaOnly + + ffmpeg -i hapqa_inputfile.mov -c copy -bsf:v hapqa_extract=texture=alpha -tag:v HapA -metadata:s:v:0 encoder="HAPAlpha Only" hapalphaonly_file.mov + + h264_metadata + Modify metadata embedded in an H.264 stream. + + aud Insert or remove AUD NAL units in all access units of the stream. + + insert + remove + sample_aspect_ratio + Set the sample aspect ratio of the stream in the VUI parameters. + + overscan_appropriate_flag + Set whether the stream is suitable for display using overscan or + not (see H.264 section E.2.1). + + video_format + video_full_range_flag + Set the video format in the stream (see H.264 section E.2.1 and + table E-2). + + colour_primaries + transfer_characteristics + matrix_coefficients + Set the colour description in the stream (see H.264 section E.2.1 + and tables E-3, E-4 and E-5). + + chroma_sample_loc_type + Set the chroma sample location in the stream (see H.264 section + E.2.1 and figure E-1). + + tick_rate + Set the tick rate (num_units_in_tick / time_scale) in the VUI + parameters. This is the smallest time unit representable in the + stream, and in many cases represents the field rate of the stream + (double the frame rate). + + fixed_frame_rate_flag + Set whether the stream has fixed framerate - typically this + indicates that the framerate is exactly half the tick rate, but the + exact meaning is dependent on interlacing and the picture structure + (see H.264 section E.2.1 and table E-6). + + crop_left + crop_right + crop_top + crop_bottom + Set the frame cropping offsets in the SPS. These values will + replace the current ones if the stream is already cropped. + + These fields are set in pixels. Note that some sizes may not be + representable if the chroma is subsampled or the stream is + interlaced (see H.264 section 7.4.2.1.1). + + sei_user_data + Insert a string as SEI unregistered user data. The argument must + be of the form UUID+string, where the UUID is as hex digits + possibly separated by hyphens, and the string can be anything. + + For example, 086f3693-b7b3-4f2c-9653-21492feee5b8+hello will insert + the string ``hello'' associated with the given UUID. + + delete_filler + Deletes both filler NAL units and filler SEI messages. + + level + Set the level in the SPS. Refer to H.264 section A.3 and tables + A-1 to A-5. + + The argument must be the name of a level (for example, 4.2), a + level_idc value (for example, 42), or the special name auto + indicating that the filter should attempt to guess the level from + the input stream properties. + + h264_mp4toannexb + Convert an H.264 bitstream from length prefixed mode to start code + prefixed mode (as defined in the Annex B of the ITU-T H.264 + specification). + + This is required by some streaming formats, typically the MPEG-2 + transport stream format (muxer "mpegts"). + + For example to remux an MP4 file containing an H.264 stream to mpegts + format with ffmpeg, you can use the command: + + ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts + + Please note that this filter is auto-inserted for MPEG-TS (muxer + "mpegts") and raw H.264 (muxer "h264") output formats. + + h264_redundant_pps + This applies a specific fixup to some Blu-ray streams which contain + redundant PPSs modifying irrelevant parameters of the stream which + confuse other transformations which require correct extradata. + + A new single global PPS is created, and all of the redundant PPSs + within the stream are removed. + + hevc_metadata + Modify metadata embedded in an HEVC stream. + + aud Insert or remove AUD NAL units in all access units of the stream. + + insert + remove + sample_aspect_ratio + Set the sample aspect ratio in the stream in the VUI parameters. + + video_format + video_full_range_flag + Set the video format in the stream (see H.265 section E.3.1 and + table E.2). + + colour_primaries + transfer_characteristics + matrix_coefficients + Set the colour description in the stream (see H.265 section E.3.1 + and tables E.3, E.4 and E.5). + + chroma_sample_loc_type + Set the chroma sample location in the stream (see H.265 section + E.3.1 and figure E.1). + + tick_rate + Set the tick rate in the VPS and VUI parameters (num_units_in_tick + / time_scale). Combined with num_ticks_poc_diff_one, this can set + a constant framerate in the stream. Note that it is likely to be + overridden by container parameters when the stream is in a + container. + + num_ticks_poc_diff_one + Set poc_proportional_to_timing_flag in VPS and VUI and use this + value to set num_ticks_poc_diff_one_minus1 (see H.265 sections + 7.4.3.1 and E.3.1). Ignored if tick_rate is not also set. + + crop_left + crop_right + crop_top + crop_bottom + Set the conformance window cropping offsets in the SPS. These + values will replace the current ones if the stream is already + cropped. + + These fields are set in pixels. Note that some sizes may not be + representable if the chroma is subsampled (H.265 section + 7.4.3.2.1). + + level + Set the level in the VPS and SPS. See H.265 section A.4 and tables + A.6 and A.7. + + The argument must be the name of a level (for example, 5.1), a + general_level_idc value (for example, 153 for level 5.1), or the + special name auto indicating that the filter should attempt to + guess the level from the input stream properties. + + hevc_mp4toannexb + Convert an HEVC/H.265 bitstream from length prefixed mode to start code + prefixed mode (as defined in the Annex B of the ITU-T H.265 + specification). + + This is required by some streaming formats, typically the MPEG-2 + transport stream format (muxer "mpegts"). + + For example to remux an MP4 file containing an HEVC stream to mpegts + format with ffmpeg, you can use the command: + + ffmpeg -i INPUT.mp4 -codec copy -bsf:v hevc_mp4toannexb OUTPUT.ts + + Please note that this filter is auto-inserted for MPEG-TS (muxer + "mpegts") and raw HEVC/H.265 (muxer "h265" or "hevc") output formats. + + imxdump + Modifies the bitstream to fit in MOV and to be usable by the Final Cut + Pro decoder. This filter only applies to the mpeg2video codec, and is + likely not needed for Final Cut Pro 7 and newer with the appropriate + -tag:v. + + For example, to remux 30 MB/sec NTSC IMX to MOV: + + ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov + + mjpeg2jpeg + Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. + + MJPEG is a video codec wherein each video frame is essentially a JPEG + image. The individual frames can be extracted without loss, e.g. by + + ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg + + Unfortunately, these chunks are incomplete JPEG images, because they + lack the DHT segment required for decoding. Quoting from + : + + Avery Lee, writing in the rec.video.desktop newsgroup in 2001, + commented that "MJPEG, or at least the MJPEG in AVIs having the MJPG + fourcc, is restricted JPEG with a fixed -- and *omitted* -- Huffman + table. The JPEG must be YCbCr colorspace, it must be 4:2:2, and it must + use basic Huffman encoding, not arithmetic or progressive. . . . You + can indeed extract the MJPEG frames and decode them with a regular JPEG + decoder, but you have to prepend the DHT segment to them, or else the + decoder won't have any idea how to decompress the data. The exact table + necessary is given in the OpenDML spec." + + This bitstream filter patches the header of frames extracted from an + MJPEG stream (carrying the AVI1 header ID and lacking a DHT segment) to + produce fully qualified JPEG images. + + ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg + exiftran -i -9 frame*.jpg + ffmpeg -i frame_%d.jpg -c:v copy rotated.avi + + mjpegadump + Add an MJPEG A header to the bitstream, to enable decoding by + Quicktime. + + mov2textsub + Extract a representable text file from MOV subtitles, stripping the + metadata header from each subtitle packet. + + See also the text2movsub filter. + + mp3decomp + Decompress non-standard compressed MP3 audio headers. + + mpeg2_metadata + Modify metadata embedded in an MPEG-2 stream. + + display_aspect_ratio + Set the display aspect ratio in the stream. + + The following fixed values are supported: + + 4/3 + 16/9 + 221/100 + + Any other value will result in square pixels being signalled + instead (see H.262 section 6.3.3 and table 6-3). + + frame_rate + Set the frame rate in the stream. This is constructed from a table + of known values combined with a small multiplier and divisor - if + the supplied value is not exactly representable, the nearest + representable value will be used instead (see H.262 section 6.3.3 + and table 6-4). + + video_format + Set the video format in the stream (see H.262 section 6.3.6 and + table 6-6). + + colour_primaries + transfer_characteristics + matrix_coefficients + Set the colour description in the stream (see H.262 section 6.3.6 + and tables 6-7, 6-8 and 6-9). + + mpeg4_unpack_bframes + Unpack DivX-style packed B-frames. + + DivX-style packed B-frames are not valid MPEG-4 and were only a + workaround for the broken Video for Windows subsystem. They use more + space, can cause minor AV sync issues, require more CPU power to decode + (unless the player has some decoded picture queue to compensate the + 2,0,2,0 frame per packet style) and cause trouble if copied into a + standard container like mp4 or mpeg-ps/ts, because MPEG-4 decoders may + not be able to decode them, since they are not valid MPEG-4. + + For example to fix an AVI file containing an MPEG-4 stream with DivX- + style packed B-frames using ffmpeg, you can use the command: + + ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi + + noise + Damages the contents of packets or simply drops them without damaging + the container. Can be used for fuzzing or testing error + resilience/concealment. + + Parameters: + + amount + A numeral string, whose value is related to how often output bytes + will be modified. Therefore, values below or equal to 0 are + forbidden, and the lower the more frequent bytes will be modified, + with 1 meaning every byte is modified. + + dropamount + A numeral string, whose value is related to how often packets will + be dropped. Therefore, values below or equal to 0 are forbidden, + and the lower the more frequent packets will be dropped, with 1 + meaning every packet is dropped. + + The following example applies the modification to every byte but does + not drop any packets. + + ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv + + null + This bitstream filter passes the packets through unchanged. + + pcm_rechunk + Repacketize PCM audio to a fixed number of samples per packet or a + fixed packet rate per second. This is similar to the asetnsamples audio + filter but works on audio packets instead of audio frames. + + nb_out_samples, n + Set the number of samples per each output audio packet. The number + is intended as the number of samples per each channel. Default + value is 1024. + + pad, p + If set to 1, the filter will pad the last audio packet with + silence, so that it will contain the same number of samples (or + roughly the same number of samples, see frame_rate) as the previous + ones. Default value is 1. + + frame_rate, r + This option makes the filter output a fixed number of packets per + second instead of a fixed number of samples per packet. If the + audio sample rate is not divisible by the frame rate then the + number of samples will not be constant but will vary slightly so + that each packet will start as close to the frame boundary as + possible. Using this option has precedence over nb_out_samples. + + You can generate the well known 1602-1601-1602-1601-1602 pattern of + 48kHz audio for NTSC frame rate using the frame_rate option. + + ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc - + + prores_metadata + Modify color property metadata embedded in prores stream. + + color_primaries + Set the color primaries. Available values are: + + auto + Keep the same color primaries property (default). + + unknown + bt709 + bt470bg + BT601 625 + + smpte170m + BT601 525 + + bt2020 + smpte431 + DCI P3 + + smpte432 + P3 D65 + + transfer_characteristics + Set the color transfer. Available values are: + + auto + Keep the same transfer characteristics property (default). + + unknown + bt709 + BT 601, BT 709, BT 2020 + + smpte2084 + SMPTE ST 2084 + + arib-std-b67 + ARIB STD-B67 + + matrix_coefficients + Set the matrix coefficient. Available values are: + + auto + Keep the same colorspace property (default). + + unknown + bt709 + smpte170m + BT 601 + + bt2020nc + + Set Rec709 colorspace for each frame of the file + + ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov + + Set Hybrid Log-Gamma parameters for each frame of the file + + ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt2020:color_trc=arib-std-b67:colorspace=bt2020nc output.mov + + remove_extra + Remove extradata from packets. + + It accepts the following parameter: + + freq + Set which frame types to remove extradata from. + + k Remove extradata from non-keyframes only. + + keyframe + Remove extradata from keyframes only. + + e, all + Remove extradata from all frames. + + setts + Set PTS and DTS in packets. + + It accepts the following parameters: + + ts + pts + dts Set expressions for PTS, DTS or both. + + The expressions are evaluated through the eval API and can contain the + following constants: + + N The count of the input packet. Starting from 0. + + TS The demux timestamp in input in case of "ts" or "dts" option or + presentation timestamp in case of "pts" option. + + POS The original position in the file of the packet, or undefined if + undefined for the current packet + + DTS The demux timestamp in input. + + PTS The presentation timestamp in input. + + STARTDTS + The DTS of the first packet. + + STARTPTS + The PTS of the first packet. + + PREV_INDTS + The previous input DTS. + + PREV_INPTS + The previous input PTS. + + PREV_OUTDTS + The previous output DTS. + + PREV_OUTPTS + The previous output PTS. + + TB The timebase of stream packet belongs. + + SR The sample rate of stream packet belongs. + + text2movsub + Convert text subtitles to MOV subtitles (as used by the "mov_text" + codec) with metadata headers. + + See also the mov2textsub filter. + + trace_headers + Log trace output containing all syntax elements in the coded stream + headers (everything above the level of individual coded blocks). This + can be useful for debugging low-level stream issues. + + Supports AV1, H.264, H.265, (M)JPEG, MPEG-2 and VP9, but depending on + the build only a subset of these may be available. + + truehd_core + Extract the core from a TrueHD stream, dropping ATMOS data. + + vp9_metadata + Modify metadata embedded in a VP9 stream. + + color_space + Set the color space value in the frame header. Note that any frame + set to RGB will be implicitly set to PC range and that RGB is + incompatible with profiles 0 and 2. + + unknown + bt601 + bt709 + smpte170 + smpte240 + bt2020 + rgb + color_range + Set the color range value in the frame header. Note that any value + imposed by the color space will take precedence over this value. + + tv + pc + + vp9_superframe + Merge VP9 invisible (alt-ref) frames back into VP9 superframes. This + fixes merging of split/segmented VP9 streams where the alt-ref frame + was split from its visible counterpart. + + vp9_superframe_split + Split VP9 superframes into single frames. + + vp9_raw_reorder + Given a VP9 stream with correct timestamps but possibly out of order, + insert additional show-existing-frame packets to correct the ordering. + +FORMAT OPTIONS + The libavformat library provides some generic global options, which can + be set on all the muxers and demuxers. In addition each muxer or + demuxer may support so-called private options, which are specific for + that component. + + Options may be set by specifying -option value in the FFmpeg tools, or + by setting the value explicitly in the "AVFormatContext" options or + using the libavutil/opt.h API for programmatic use. + + The list of supported options follows: + + avioflags flags (input/output) + Possible values: + + direct + Reduce buffering. + + probesize integer (input) + Set probing size in bytes, i.e. the size of the data to analyze to + get stream information. A higher value will enable detecting more + information in case it is dispersed into the stream, but will + increase latency. Must be an integer not lesser than 32. It is + 5000000 by default. + + max_probe_packets integer (input) + Set the maximum number of buffered packets when probing a codec. + Default is 2500 packets. + + packetsize integer (output) + Set packet size. + + fflags flags + Set format flags. Some are implemented for a limited number of + formats. + + Possible values for input files: + + discardcorrupt + Discard corrupted packets. + + fastseek + Enable fast, but inaccurate seeks for some formats. + + genpts + Generate missing PTS if DTS is present. + + igndts + Ignore DTS if PTS is set. Inert when nofillin is set. + + ignidx + Ignore index. + + keepside (deprecated,inert) + nobuffer + Reduce the latency introduced by buffering during initial input + streams analysis. + + nofillin + Do not fill in missing values in packet fields that can be + exactly calculated. + + noparse + Disable AVParsers, this needs "+nofillin" too. + + sortdts + Try to interleave output packets by DTS. At present, available + only for AVIs with an index. + + Possible values for output files: + + autobsf + Automatically apply bitstream filters as required by the output + format. Enabled by default. + + bitexact + Only write platform-, build- and time-independent data. This + ensures that file and data checksums are reproducible and match + between platforms. Its primary use is for regression testing. + + flush_packets + Write out packets immediately. + + latm (deprecated,inert) + shortest + Stop muxing at the end of the shortest stream. It may be + needed to increase max_interleave_delta to avoid flushing the + longer streams before EOF. + + seek2any integer (input) + Allow seeking to non-keyframes on demuxer level when supported if + set to 1. Default is 0. + + analyzeduration integer (input) + Specify how many microseconds are analyzed to probe the input. A + higher value will enable detecting more accurate information, but + will increase latency. It defaults to 5,000,000 microseconds = 5 + seconds. + + cryptokey hexadecimal string (input) + Set decryption key. + + indexmem integer (input) + Set max memory used for timestamp index (per stream). + + rtbufsize integer (input) + Set max memory used for buffering real-time frames. + + fdebug flags (input/output) + Print specific debug info. + + Possible values: + + ts + max_delay integer (input/output) + Set maximum muxing or demuxing delay in microseconds. + + fpsprobesize integer (input) + Set number of frames used to probe fps. + + audio_preload integer (output) + Set microseconds by which audio packets should be interleaved + earlier. + + chunk_duration integer (output) + Set microseconds for each chunk. + + chunk_size integer (output) + Set size in bytes for each chunk. + + err_detect, f_err_detect flags (input) + Set error detection flags. "f_err_detect" is deprecated and should + be used only via the ffmpeg tool. + + Possible values: + + crccheck + Verify embedded CRCs. + + bitstream + Detect bitstream specification deviations. + + buffer + Detect improper bitstream length. + + explode + Abort decoding on minor error detection. + + careful + Consider things that violate the spec and have not been seen in + the wild as errors. + + compliant + Consider all spec non compliancies as errors. + + aggressive + Consider things that a sane encoder should not do as an error. + + max_interleave_delta integer (output) + Set maximum buffering duration for interleaving. The duration is + expressed in microseconds, and defaults to 10000000 (10 seconds). + + To ensure all the streams are interleaved correctly, libavformat + will wait until it has at least one packet for each stream before + actually writing any packets to the output file. When some streams + are "sparse" (i.e. there are large gaps between successive + packets), this can result in excessive buffering. + + This field specifies the maximum difference between the timestamps + of the first and the last packet in the muxing queue, above which + libavformat will output a packet regardless of whether it has + queued a packet for all the streams. + + If set to 0, libavformat will continue buffering packets until it + has a packet for each stream, regardless of the maximum timestamp + difference between the buffered packets. + + use_wallclock_as_timestamps integer (input) + Use wallclock as timestamps if set to 1. Default is 0. + + avoid_negative_ts integer (output) + Possible values: + + make_non_negative + Shift timestamps to make them non-negative. Also note that + this affects only leading negative timestamps, and not non- + monotonic negative timestamps. + + make_zero + Shift timestamps so that the first timestamp is 0. + + auto (default) + Enables shifting when required by the target format. + + disabled + Disables shifting of timestamp. + + When shifting is enabled, all output timestamps are shifted by the + same amount. Audio, video, and subtitles desynching and relative + timestamp differences are preserved compared to how they would have + been without shifting. + + skip_initial_bytes integer (input) + Set number of bytes to skip before reading header and frames if set + to 1. Default is 0. + + correct_ts_overflow integer (input) + Correct single timestamp overflows if set to 1. Default is 1. + + flush_packets integer (output) + Flush the underlying I/O stream after each packet. Default is -1 + (auto), which means that the underlying protocol will decide, 1 + enables it, and has the effect of reducing the latency, 0 disables + it and may increase IO throughput in some cases. + + output_ts_offset offset (output) + Set the output time offset. + + offset must be a time duration specification, see the Time duration + section in the ffmpeg-utils(1) manual. + + The offset is added by the muxer to the output timestamps. + + Specifying a positive offset means that the corresponding streams + are delayed bt the time duration specified in offset. Default value + is 0 (meaning that no offset is applied). + + format_whitelist list (input) + "," separated list of allowed demuxers. By default all are allowed. + + dump_separator string (input) + Separator used to separate the fields printed on the command line + about the Stream parameters. For example, to separate the fields + with newlines and indentation: + + ffprobe -dump_separator " + " -i ~/videos/matrixbench_mpeg2.mpg + + max_streams integer (input) + Specifies the maximum number of streams. This can be used to reject + files that would require too many resources due to a large number + of streams. + + skip_estimate_duration_from_pts bool (input) + Skip estimation of input duration when calculated using PTS. At + present, applicable for MPEG-PS and MPEG-TS. + + strict, f_strict integer (input/output) + Specify how strictly to follow the standards. "f_strict" is + deprecated and should be used only via the ffmpeg tool. + + Possible values: + + very + strictly conform to an older more strict version of the spec or + reference software + + strict + strictly conform to all the things in the spec no matter what + consequences + + normal + unofficial + allow unofficial extensions + + experimental + allow non standardized experimental things, experimental + (unfinished/work in progress/not well tested) decoders and + encoders. Note: experimental decoders can pose a security + risk, do not use this for decoding untrusted input. + + Format stream specifiers + Format stream specifiers allow selection of one or more streams that + match specific properties. + + The exact semantics of stream specifiers is defined by the + "avformat_match_stream_specifier()" function declared in the + libavformat/avformat.h header and documented in the Stream specifiers + section in the ffmpeg(1) manual. + +DEMUXERS + Demuxers are configured elements in FFmpeg that can read the multimedia + streams from a particular type of file. + + When you configure your FFmpeg build, all the supported demuxers are + enabled by default. You can list all available ones using the configure + option "--list-demuxers". + + You can disable all the demuxers using the configure option + "--disable-demuxers", and selectively enable a single demuxer with the + option "--enable-demuxer=DEMUXER", or disable it with the option + "--disable-demuxer=DEMUXER". + + The option "-demuxers" of the ff* tools will display the list of + enabled demuxers. Use "-formats" to view a combined list of enabled + demuxers and muxers. + + The description of some of the currently available demuxers follows. + + aa + Audible Format 2, 3, and 4 demuxer. + + This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files. + + apng + Animated Portable Network Graphics demuxer. + + This demuxer is used to demux APNG files. All headers, but the PNG + signature, up to (but not including) the first fcTL chunk are + transmitted as extradata. Frames are then split as being all the + chunks between two fcTL ones, or between the last fcTL and IEND chunks. + + -ignore_loop bool + Ignore the loop variable in the file if set. + + -max_fps int + Maximum framerate in frames per second (0 for no limit). + + -default_fps int + Default framerate in frames per second when none is specified in + the file (0 meaning as fast as possible). + + asf + Advanced Systems Format demuxer. + + This demuxer is used to demux ASF files and MMS network streams. + + -no_resync_search bool + Do not try to resynchronize by looking for a certain optional start + code. + + concat + Virtual concatenation script demuxer. + + This demuxer reads a list of files and other directives from a text + file and demuxes them one after the other, as if all their packets had + been muxed together. + + The timestamps in the files are adjusted so that the first file starts + at 0 and each next file starts where the previous one finishes. Note + that it is done globally and may cause gaps if all streams do not have + exactly the same length. + + All files must have the same streams (same codecs, same time base, + etc.). + + The duration of each file is used to adjust the timestamps of the next + file: if the duration is incorrect (because it was computed using the + bit-rate or because the file is truncated, for example), it can cause + artifacts. The "duration" directive can be used to override the + duration stored in each file. + + Syntax + + The script is a text file in extended-ASCII, with one directive per + line. Empty lines, leading spaces and lines starting with '#' are + ignored. The following directive is recognized: + + "file path" + Path to a file to read; special characters and spaces must be + escaped with backslash or single quotes. + + All subsequent file-related directives apply to that file. + + "ffconcat version 1.0" + Identify the script type and version. It also sets the safe option + to 1 if it was -1. + + To make FFmpeg recognize the format automatically, this directive + must appear exactly as is (no extra space or byte-order-mark) on + the very first line of the script. + + "duration dur" + Duration of the file. This information can be specified from the + file; specifying it here may be more efficient or help if the + information from the file is not available or accurate. + + If the duration is set for all files, then it is possible to seek + in the whole concatenated video. + + "inpoint timestamp" + In point of the file. When the demuxer opens the file it instantly + seeks to the specified timestamp. Seeking is done so that all + streams can be presented successfully at In point. + + This directive works best with intra frame codecs, because for non- + intra frame ones you will usually get extra packets before the + actual In point and the decoded content will most likely contain + frames before In point too. + + For each file, packets before the file In point will have + timestamps less than the calculated start timestamp of the file + (negative in case of the first file), and the duration of the files + (if not specified by the "duration" directive) will be reduced + based on their specified In point. + + Because of potential packets before the specified In point, packet + timestamps may overlap between two concatenated files. + + "outpoint timestamp" + Out point of the file. When the demuxer reaches the specified + decoding timestamp in any of the streams, it handles it as an end + of file condition and skips the current and all the remaining + packets from all streams. + + Out point is exclusive, which means that the demuxer will not + output packets with a decoding timestamp greater or equal to Out + point. + + This directive works best with intra frame codecs and formats where + all streams are tightly interleaved. For non-intra frame codecs you + will usually get additional packets with presentation timestamp + after Out point therefore the decoded content will most likely + contain frames after Out point too. If your streams are not tightly + interleaved you may not get all the packets from all streams before + Out point and you may only will be able to decode the earliest + stream until Out point. + + The duration of the files (if not specified by the "duration" + directive) will be reduced based on their specified Out point. + + "file_packet_metadata key=value" + Metadata of the packets of the file. The specified metadata will be + set for each file packet. You can specify this directive multiple + times to add multiple metadata entries. + + "stream" + Introduce a stream in the virtual file. All subsequent stream- + related directives apply to the last introduced stream. Some + streams properties must be set in order to allow identifying the + matching streams in the subfiles. If no streams are defined in the + script, the streams from the first file are copied. + + "exact_stream_id id" + Set the id of the stream. If this directive is given, the string + with the corresponding id in the subfiles will be used. This is + especially useful for MPEG-PS (VOB) files, where the order of the + streams is not reliable. + + Options + + This demuxer accepts the following option: + + safe + If set to 1, reject unsafe file paths. A file path is considered + safe if it does not contain a protocol specification and is + relative and all components only contain characters from the + portable character set (letters, digits, period, underscore and + hyphen) and have no period at the beginning of a component. + + If set to 0, any file name is accepted. + + The default is 1. + + -1 is equivalent to 1 if the format was automatically probed and 0 + otherwise. + + auto_convert + If set to 1, try to perform automatic conversions on packet data to + make the streams concatenable. The default is 1. + + Currently, the only conversion is adding the h264_mp4toannexb + bitstream filter to H.264 streams in MP4 format. This is necessary + in particular if there are resolution changes. + + segment_time_metadata + If set to 1, every packet will contain the lavf.concat.start_time + and the lavf.concat.duration packet metadata values which are the + start_time and the duration of the respective file segments in the + concatenated output expressed in microseconds. The duration + metadata is only set if it is known based on the concat file. The + default is 0. + + Examples + + o Use absolute filenames and include some comments: + + # my first filename + file /mnt/share/file-1.wav + # my second filename including whitespace + file '/mnt/share/file 2.wav' + # my third filename including whitespace plus single quote + file '/mnt/share/file 3'\''.wav' + + o Allow for input format auto-probing, use safe filenames and set the + duration of the first file: + + ffconcat version 1.0 + + file file-1.wav + duration 20.0 + + file subdir/file-2.wav + + dash + Dynamic Adaptive Streaming over HTTP demuxer. + + This demuxer presents all AVStreams found in the manifest. By setting + the discard flags on AVStreams the caller can decide which streams to + actually receive. Each stream mirrors the "id" and "bandwidth" + properties from the "" as metadata keys named "id" and + "variant_bitrate" respectively. + + flv, live_flv + Adobe Flash Video Format demuxer. + + This demuxer is used to demux FLV files and RTMP network streams. In + case of live network streams, if you force format, you may use live_flv + option instead of flv to survive timestamp discontinuities. + + ffmpeg -f flv -i myfile.flv ... + ffmpeg -f live_flv -i rtmp:///anything/key .... + + -flv_metadata bool + Allocate the streams according to the onMetaData array content. + + -flv_ignore_prevtag bool + Ignore the size of previous tag value. + + -flv_full_metadata bool + Output all context of the onMetadata. + + gif + Animated GIF demuxer. + + It accepts the following options: + + min_delay + Set the minimum valid delay between frames in hundredths of + seconds. Range is 0 to 6000. Default value is 2. + + max_gif_delay + Set the maximum valid delay between frames in hundredth of seconds. + Range is 0 to 65535. Default value is 65535 (nearly eleven + minutes), the maximum value allowed by the specification. + + default_delay + Set the default delay between frames in hundredths of seconds. + Range is 0 to 6000. Default value is 10. + + ignore_loop + GIF files can contain information to loop a certain number of times + (or infinitely). If ignore_loop is set to 1, then the loop setting + from the input will be ignored and looping will not occur. If set + to 0, then looping will occur and will cycle the number of times + according to the GIF. Default value is 1. + + For example, with the overlay filter, place an infinitely looping GIF + over another video: + + ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv + + Note that in the above example the shortest option for overlay filter + is used to end the output video at the length of the shortest input + file, which in this case is input.mp4 as the GIF in this example loops + infinitely. + + hls + HLS demuxer + + Apple HTTP Live Streaming demuxer. + + This demuxer presents all AVStreams from all variant streams. The id + field is set to the bitrate variant index number. By setting the + discard flags on AVStreams (by pressing 'a' or 'v' in ffplay), the + caller can decide which variant streams to actually receive. The total + bitrate of the variant that the stream belongs to is available in a + metadata key named "variant_bitrate". + + It accepts the following options: + + live_start_index + segment index to start live streams at (negative values are from + the end). + + allowed_extensions + ',' separated list of file extensions that hls is allowed to + access. + + max_reload + Maximum number of times a insufficient list is attempted to be + reloaded. Default value is 1000. + + m3u8_hold_counters + The maximum number of times to load m3u8 when it refreshes without + new segments. Default value is 1000. + + http_persistent + Use persistent HTTP connections. Applicable only for HTTP streams. + Enabled by default. + + http_multiple + Use multiple HTTP connections for downloading HTTP segments. + Enabled by default for HTTP/1.1 servers. + + http_seekable + Use HTTP partial requests for downloading HTTP segments. 0 = + disable, 1 = enable, -1 = auto, Default is auto. + + image2 + Image file demuxer. + + This demuxer reads from a list of image files specified by a pattern. + The syntax and meaning of the pattern is specified by the option + pattern_type. + + The pattern may contain a suffix which is used to automatically + determine the format of the images contained in the files. + + The size, the pixel format, and the format of each image must be the + same for all the files in the sequence. + + This demuxer accepts the following options: + + framerate + Set the frame rate for the video stream. It defaults to 25. + + loop + If set to 1, loop over the input. Default value is 0. + + pattern_type + Select the pattern type used to interpret the provided filename. + + pattern_type accepts one of the following values. + + none + Disable pattern matching, therefore the video will only contain + the specified image. You should use this option if you do not + want to create sequences from multiple images and your + filenames may contain special pattern characters. + + sequence + Select a sequence pattern type, used to specify a sequence of + files indexed by sequential numbers. + + A sequence pattern may contain the string "%d" or "%0Nd", which + specifies the position of the characters representing a + sequential number in each filename matched by the pattern. If + the form "%d0Nd" is used, the string representing the number in + each filename is 0-padded and N is the total number of 0-padded + digits representing the number. The literal character '%' can + be specified in the pattern with the string "%%". + + If the sequence pattern contains "%d" or "%0Nd", the first + filename of the file list specified by the pattern must contain + a number inclusively contained between start_number and + start_number+start_number_range-1, and all the following + numbers must be sequential. + + For example the pattern "img-%03d.bmp" will match a sequence of + filenames of the form img-001.bmp, img-002.bmp, ..., + img-010.bmp, etc.; the pattern "i%%m%%g-%d.jpg" will match a + sequence of filenames of the form i%m%g-1.jpg, i%m%g-2.jpg, + ..., i%m%g-10.jpg, etc. + + Note that the pattern must not necessarily contain "%d" or + "%0Nd", for example to convert a single image file img.jpeg you + can employ the command: + + ffmpeg -i img.jpeg img.png + + glob + Select a glob wildcard pattern type. + + The pattern is interpreted like a "glob()" pattern. This is + only selectable if libavformat was compiled with globbing + support. + + glob_sequence (deprecated, will be removed) + Select a mixed glob wildcard/sequence pattern. + + If your version of libavformat was compiled with globbing + support, and the provided pattern contains at least one glob + meta character among "%*?[]{}" that is preceded by an unescaped + "%", the pattern is interpreted like a "glob()" pattern, + otherwise it is interpreted like a sequence pattern. + + All glob special characters "%*?[]{}" must be prefixed with + "%". To escape a literal "%" you shall use "%%". + + For example the pattern "foo-%*.jpeg" will match all the + filenames prefixed by "foo-" and terminating with ".jpeg", and + "foo-%?%?%?.jpeg" will match all the filenames prefixed with + "foo-", followed by a sequence of three characters, and + terminating with ".jpeg". + + This pattern type is deprecated in favor of glob and sequence. + + Default value is glob_sequence. + + pixel_format + Set the pixel format of the images to read. If not specified the + pixel format is guessed from the first image file in the sequence. + + start_number + Set the index of the file matched by the image file pattern to + start to read from. Default value is 0. + + start_number_range + Set the index interval range to check when looking for the first + image file in the sequence, starting from start_number. Default + value is 5. + + ts_from_file + If set to 1, will set frame timestamp to modification time of image + file. Note that monotonity of timestamps is not provided: images go + in the same order as without this option. Default value is 0. If + set to 2, will set frame timestamp to the modification time of the + image file in nanosecond precision. + + video_size + Set the video size of the images to read. If not specified the + video size is guessed from the first image file in the sequence. + + export_path_metadata + If set to 1, will add two extra fields to the metadata found in + input, making them also available for other filters (see drawtext + filter for examples). Default value is 0. The extra fields are + described below: + + lavf.image2dec.source_path + Corresponds to the full path to the input file being read. + + lavf.image2dec.source_basename + Corresponds to the name of the file being read. + + Examples + + o Use ffmpeg for creating a video from the images in the file + sequence img-001.jpeg, img-002.jpeg, ..., assuming an input frame + rate of 10 frames per second: + + ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv + + o As above, but start by reading from a file with index 100 in the + sequence: + + ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv + + o Read images matching the "*.png" glob pattern , that is all the + files terminating with the ".png" suffix: + + ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv + + libgme + The Game Music Emu library is a collection of video game music file + emulators. + + See for more + information. + + It accepts the following options: + + track_index + Set the index of which track to demux. The demuxer can only export + one track. Track indexes start at 0. Default is to pick the first + track. Number of tracks is exported as tracks metadata entry. + + sample_rate + Set the sampling rate of the exported track. Range is 1000 to + 999999. Default is 44100. + + max_size (bytes) + The demuxer buffers the entire file into memory. Adjust this value + to set the maximum buffer size, which in turn, acts as a ceiling + for the size of files that can be read. Default is 50 MiB. + + libmodplug + ModPlug based module demuxer + + See + + It will export one 2-channel 16-bit 44.1 kHz audio stream. Optionally, + a "pal8" 16-color video stream can be exported with or without printed + metadata. + + It accepts the following options: + + noise_reduction + Apply a simple low-pass filter. Can be 1 (on) or 0 (off). Default + is 0. + + reverb_depth + Set amount of reverb. Range 0-100. Default is 0. + + reverb_delay + Set delay in ms, clamped to 40-250 ms. Default is 0. + + bass_amount + Apply bass expansion a.k.a. XBass or megabass. Range is 0 (quiet) + to 100 (loud). Default is 0. + + bass_range + Set cutoff i.e. upper-bound for bass frequencies. Range is 10-100 + Hz. Default is 0. + + surround_depth + Apply a Dolby Pro-Logic surround effect. Range is 0 (quiet) to 100 + (heavy). Default is 0. + + surround_delay + Set surround delay in ms, clamped to 5-40 ms. Default is 0. + + max_size + The demuxer buffers the entire file into memory. Adjust this value + to set the maximum buffer size, which in turn, acts as a ceiling + for the size of files that can be read. Range is 0 to 100 MiB. 0 + removes buffer size limit (not recommended). Default is 5 MiB. + + video_stream_expr + String which is evaluated using the eval API to assign colors to + the generated video stream. Variables which can be used are "x", + "y", "w", "h", "t", "speed", "tempo", "order", "pattern" and "row". + + video_stream + Generate video stream. Can be 1 (on) or 0 (off). Default is 0. + + video_stream_w + Set video frame width in 'chars' where one char indicates 8 pixels. + Range is 20-512. Default is 30. + + video_stream_h + Set video frame height in 'chars' where one char indicates 8 + pixels. Range is 20-512. Default is 30. + + video_stream_ptxt + Print metadata on video stream. Includes "speed", "tempo", "order", + "pattern", "row" and "ts" (time in ms). Can be 1 (on) or 0 (off). + Default is 1. + + libopenmpt + libopenmpt based module demuxer + + See for more information. + + Some files have multiple subsongs (tracks) this can be set with the + subsong option. + + It accepts the following options: + + subsong + Set the subsong index. This can be either 'all', 'auto', or the + index of the subsong. Subsong indexes start at 0. The default is + 'auto'. + + The default value is to let libopenmpt choose. + + layout + Set the channel layout. Valid values are 1, 2, and 4 channel + layouts. The default value is STEREO. + + sample_rate + Set the sample rate for libopenmpt to output. Range is from 1000 + to INT_MAX. The value default is 48000. + + mov/mp4/3gp + Demuxer for Quicktime File Format & ISO/IEC Base Media File Format + (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part + 12). + + Registered extensions: mov, mp4, m4a, 3gp, 3g2, mj2, psp, m4b, ism, + ismv, isma, f4v + + Options + + This demuxer accepts the following options: + + enable_drefs + Enable loading of external tracks, disabled by default. Enabling + this can theoretically leak information in some use cases. + + use_absolute_path + Allows loading of external tracks via absolute paths, disabled by + default. Enabling this poses a security risk. It should only be + enabled if the source is known to be non-malicious. + + seek_streams_individually + When seeking, identify the closest point in each stream + individually and demux packets in that stream from identified + point. This can lead to a different sequence of packets compared to + demuxing linearly from the beginning. Default is true. + + ignore_editlist + Ignore any edit list atoms. The demuxer, by default, modifies the + stream index to reflect the timeline described by the edit list. + Default is false. + + advanced_editlist + Modify the stream index to reflect the timeline described by the + edit list. "ignore_editlist" must be set to false for this option + to be effective. If both "ignore_editlist" and this option are set + to false, then only the start of the stream index is modified to + reflect initial dwell time or starting timestamp described by the + edit list. Default is true. + + ignore_chapters + Don't parse chapters. This includes GoPro 'HiLight' tags/moments. + Note that chapters are only parsed when input is seekable. Default + is false. + + use_mfra_for + For seekable fragmented input, set fragment's starting timestamp + from media fragment random access box, if present. + + Following options are available: + + auto + Auto-detect whether to set mfra timestamps as PTS or DTS + (default) + + dts Set mfra timestamps as DTS + + pts Set mfra timestamps as PTS + + 0 Don't use mfra box to set timestamps + + export_all + Export unrecognized boxes within the udta box as metadata entries. + The first four characters of the box type are set as the key. + Default is false. + + export_xmp + Export entire contents of XMP_ box and uuid box as a string with + key "xmp". Note that if "export_all" is set and this option isn't, + the contents of XMP_ box are still exported but with key "XMP_". + Default is false. + + activation_bytes + 4-byte key required to decrypt Audible AAX and AAX+ files. See + Audible AAX subsection below. + + audible_fixed_key + Fixed key used for handling Audible AAX/AAX+ files. It has been + pre-set so should not be necessary to specify. + + decryption_key + 16-byte key, in hex, to decrypt files encrypted using ISO Common + Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7). + + Audible AAX + + Audible AAX files are encrypted M4B files, and they can be decrypted by + specifying a 4 byte activation secret. + + ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4 + + mpegts + MPEG-2 transport stream demuxer. + + This demuxer accepts the following options: + + resync_size + Set size limit for looking up a new synchronization. Default value + is 65536. + + skip_unknown_pmt + Skip PMTs for programs not defined in the PAT. Default value is 0. + + fix_teletext_pts + Override teletext packet PTS and DTS values with the timestamps + calculated from the PCR of the first program which the teletext + stream is part of and is not discarded. Default value is 1, set + this option to 0 if you want your teletext packet PTS and DTS + values untouched. + + ts_packetsize + Output option carrying the raw packet size in bytes. Show the + detected raw packet size, cannot be set by the user. + + scan_all_pmts + Scan and combine all PMTs. The value is an integer with value from + -1 to 1 (-1 means automatic setting, 1 means enabled, 0 means + disabled). Default value is -1. + + merge_pmt_versions + Re-use existing streams when a PMT's version is updated and + elementary streams move to different PIDs. Default value is 0. + + mpjpeg + MJPEG encapsulated in multi-part MIME demuxer. + + This demuxer allows reading of MJPEG, where each frame is represented + as a part of multipart/x-mixed-replace stream. + + strict_mime_boundary + Default implementation applies a relaxed standard to multi-part + MIME boundary detection, to prevent regression with numerous + existing endpoints not generating a proper MIME MJPEG stream. + Turning this option on by setting it to 1 will result in a stricter + check of the boundary value. + + rawvideo + Raw video demuxer. + + This demuxer allows one to read raw video data. Since there is no + header specifying the assumed video parameters, the user must specify + them in order to be able to decode the data correctly. + + This demuxer accepts the following options: + + framerate + Set input video frame rate. Default value is 25. + + pixel_format + Set the input video pixel format. Default value is "yuv420p". + + video_size + Set the input video size. This value must be specified explicitly. + + For example to read a rawvideo file input.raw with ffplay, assuming a + pixel format of "rgb24", a video size of "320x240", and a frame rate of + 10 images per second, use the command: + + ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw + + sbg + SBaGen script demuxer. + + This demuxer reads the script language used by SBaGen + to generate binaural beats sessions. A SBG + script looks like that: + + -SE + a: 300-2.5/3 440+4.5/0 + b: 300-2.5/0 440+4.5/3 + off: - + NOW == a + +0:07:00 == b + +0:14:00 == a + +0:21:00 == b + +0:30:00 off + + A SBG script can mix absolute and relative timestamps. If the script + uses either only absolute timestamps (including the script start time) + or only relative ones, then its layout is fixed, and the conversion is + straightforward. On the other hand, if the script mixes both kind of + timestamps, then the NOW reference for relative timestamps will be + taken from the current time of day at the time the script is read, and + the script layout will be frozen according to that reference. That + means that if the script is directly played, the actual times will + match the absolute timestamps up to the sound controller's clock + accuracy, but if the user somehow pauses the playback or seeks, all + times will be shifted accordingly. + + tedcaptions + JSON captions used for . + + TED does not provide links to the captions, but they can be guessed + from the page. The file tools/bookmarklets.html from the FFmpeg source + tree contains a bookmarklet to expose them. + + This demuxer accepts the following option: + + start_time + Set the start time of the TED talk, in milliseconds. The default is + 15000 (15s). It is used to sync the captions with the downloadable + videos, because they include a 15s intro. + + Example: convert the captions to a format most players understand: + + ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt + + vapoursynth + Vapoursynth wrapper. + + Due to security concerns, Vapoursynth scripts will not be autodetected + so the input format has to be forced. For ff* CLI tools, add "-f + vapoursynth" before the input "-i yourscript.vpy". + + This demuxer accepts the following option: + + max_script_size + The demuxer buffers the entire script into memory. Adjust this + value to set the maximum buffer size, which in turn, acts as a + ceiling for the size of scripts that can be read. Default is 1 + MiB. + +MUXERS + Muxers are configured elements in FFmpeg which allow writing multimedia + streams to a particular type of file. + + When you configure your FFmpeg build, all the supported muxers are + enabled by default. You can list all available muxers using the + configure option "--list-muxers". + + You can disable all the muxers with the configure option + "--disable-muxers" and selectively enable / disable single muxers with + the options "--enable-muxer=MUXER" / "--disable-muxer=MUXER". + + The option "-muxers" of the ff* tools will display the list of enabled + muxers. Use "-formats" to view a combined list of enabled demuxers and + muxers. + + A description of some of the currently available muxers follows. + + aiff + Audio Interchange File Format muxer. + + Options + + It accepts the following options: + + write_id3v2 + Enable ID3v2 tags writing when set to 1. Default is 0 (disabled). + + id3v2_version + Select ID3v2 version to write. Currently only version 3 and 4 (aka. + ID3v2.3 and ID3v2.4) are supported. The default is version 4. + + asf + Advanced Systems Format muxer. + + Note that Windows Media Audio (wma) and Windows Media Video (wmv) use + this muxer too. + + Options + + It accepts the following options: + + packet_size + Set the muxer packet size. By tuning this setting you may reduce + data fragmentation or muxer overhead depending on your source. + Default value is 3200, minimum is 100, maximum is 64k. + + avi + Audio Video Interleaved muxer. + + Options + + It accepts the following options: + + reserve_index_space + Reserve the specified amount of bytes for the OpenDML master index + of each stream within the file header. By default additional master + indexes are embedded within the data packets if there is no space + left in the first master index and are linked together as a chain + of indexes. This index structure can cause problems for some use + cases, e.g. third-party software strictly relying on the OpenDML + index specification or when file seeking is slow. Reserving enough + index space in the file header avoids these problems. + + The required index space depends on the output file size and should + be about 16 bytes per gigabyte. When this option is omitted or set + to zero the necessary index space is guessed. + + write_channel_mask + Write the channel layout mask into the audio stream header. + + This option is enabled by default. Disabling the channel mask can + be useful in specific scenarios, e.g. when merging multiple audio + streams into one for compatibility with software that only supports + a single audio stream in AVI (see the "amerge" section in the + ffmpeg-filters manual). + + flipped_raw_rgb + If set to true, store positive height for raw RGB bitmaps, which + indicates bitmap is stored bottom-up. Note that this option does + not flip the bitmap which has to be done manually beforehand, e.g. + by using the vflip filter. Default is false and indicates bitmap + is stored top down. + + chromaprint + Chromaprint fingerprinter. + + This muxer feeds audio data to the Chromaprint library, which generates + a fingerprint for the provided audio data. See + + + It takes a single signed native-endian 16-bit raw audio stream of at + most 2 channels. + + Options + + silence_threshold + Threshold for detecting silence. Range is from -1 to 32767, where + -1 disables silence detection. Silence detection can only be used + with version 3 of the algorithm. Silence detection must be + disabled for use with the AcoustID service. Default is -1. + + algorithm + Version of algorithm to fingerprint with. Range is 0 to 4. Version + 3 enables silence detection. Default is 1. + + fp_format + Format to output the fingerprint as. Accepts the following options: + + raw Binary raw fingerprint + + compressed + Binary compressed fingerprint + + base64 + Base64 compressed fingerprint (default) + + crc + CRC (Cyclic Redundancy Check) testing format. + + This muxer computes and prints the Adler-32 CRC of all the input audio + and video frames. By default audio frames are converted to signed + 16-bit raw audio and video frames to raw video before computing the + CRC. + + The output of the muxer consists of a single line of the form: + CRC=0xCRC, where CRC is a hexadecimal number 0-padded to 8 digits + containing the CRC for all the decoded input frames. + + See also the framecrc muxer. + + Examples + + For example to compute the CRC of the input, and store it in the file + out.crc: + + ffmpeg -i INPUT -f crc out.crc + + You can print the CRC to stdout with the command: + + ffmpeg -i INPUT -f crc - + + You can select the output format of each frame with ffmpeg by + specifying the audio and video codec and format. For example to compute + the CRC of the input audio converted to PCM unsigned 8-bit and the + input video converted to MPEG-2 video, use the command: + + ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc - + + flv + Adobe Flash Video Format muxer. + + This muxer accepts the following options: + + flvflags flags + Possible values: + + aac_seq_header_detect + Place AAC sequence header based on audio stream data. + + no_sequence_end + Disable sequence end tag. + + no_metadata + Disable metadata tag. + + no_duration_filesize + Disable duration and filesize in metadata when they are equal + to zero at the end of stream. (Be used to non-seekable living + stream). + + add_keyframe_index + Used to facilitate seeking; particularly for HTTP pseudo + streaming. + + dash + Dynamic Adaptive Streaming over HTTP (DASH) muxer that creates segments + and manifest files according to the MPEG-DASH standard ISO/IEC + 23009-1:2014. + + For more information see: + + o ISO DASH Specification: + + + o WebM DASH Specification: + + + It creates a MPD manifest file and segment files for each stream. + + The segment filename might contain pre-defined identifiers used with + SegmentTemplate as defined in section 5.3.9.4.4 of the standard. + Available identifiers are "$RepresentationID$", "$Number$", + "$Bandwidth$" and "$Time$". In addition to the standard identifiers, + an ffmpeg-specific "$ext$" identifier is also supported. When + specified ffmpeg will replace $ext$ in the file name with muxing + format's extensions such as mp4, webm etc., + + ffmpeg -re -i -map 0 -map 0 -c:a libfdk_aac -c:v libx264 \ + -b:v:0 800k -b:v:1 300k -s:v:1 320x170 -profile:v:1 baseline \ + -profile:v:0 main -bf 1 -keyint_min 120 -g 120 -sc_threshold 0 \ + -b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1 \ + -window_size 5 -adaptation_sets "id=0,streams=v id=1,streams=a" \ + -f dash /path/to/out.mpd + + min_seg_duration microseconds + This is a deprecated option to set the segment length in + microseconds, use seg_duration instead. + + seg_duration duration + Set the segment length in seconds (fractional value can be set). + The value is treated as average segment duration when use_template + is enabled and use_timeline is disabled and as minimum segment + duration for all the other use cases. + + frag_duration duration + Set the length in seconds of fragments within segments (fractional + value can be set). + + frag_type type + Set the type of interval for fragmentation. + + window_size size + Set the maximum number of segments kept in the manifest. + + extra_window_size size + Set the maximum number of segments kept outside of the manifest + before removing from disk. + + remove_at_exit remove + Enable (1) or disable (0) removal of all segments when finished. + + use_template template + Enable (1) or disable (0) use of SegmentTemplate instead of + SegmentList. + + use_timeline timeline + Enable (1) or disable (0) use of SegmentTimeline in + SegmentTemplate. + + single_file single_file + Enable (1) or disable (0) storing all segments in one file, + accessed using byte ranges. + + single_file_name file_name + DASH-templated name to be used for baseURL. Implies single_file set + to "1". In the template, "$ext$" is replaced with the file name + extension specific for the segment format. + + init_seg_name init_name + DASH-templated name to used for the initialization segment. Default + is "init-stream$RepresentationID$.$ext$". "$ext$" is replaced with + the file name extension specific for the segment format. + + media_seg_name segment_name + DASH-templated name to used for the media segments. Default is + "chunk-stream$RepresentationID$-$Number%05d$.$ext$". "$ext$" is + replaced with the file name extension specific for the segment + format. + + utc_timing_url utc_url + URL of the page that will return the UTC timestamp in ISO format. + Example: "https://time.akamai.com/?iso" + + method method + Use the given HTTP method to create output files. Generally set to + PUT or POST. + + http_user_agent user_agent + Override User-Agent field in HTTP header. Applicable only for HTTP + output. + + http_persistent http_persistent + Use persistent HTTP connections. Applicable only for HTTP output. + + hls_playlist hls_playlist + Generate HLS playlist files as well. The master playlist is + generated with the filename hls_master_name. One media playlist + file is generated for each stream with filenames media_0.m3u8, + media_1.m3u8, etc. + + hls_master_name file_name + HLS master playlist name. Default is "master.m3u8". + + streaming streaming + Enable (1) or disable (0) chunk streaming mode of output. In chunk + streaming mode, each frame will be a moof fragment which forms a + chunk. + + adaptation_sets adaptation_sets + Assign streams to AdaptationSets. Syntax is "id=x,streams=a,b,c + id=y,streams=d,e" with x and y being the IDs of the adaptation sets + and a,b,c,d and e are the indices of the mapped streams. + + To map all video (or audio) streams to an AdaptationSet, "v" (or + "a") can be used as stream identifier instead of IDs. + + When no assignment is defined, this defaults to an AdaptationSet + for each stream. + + Optional syntax is + "id=x,seg_duration=x,frag_duration=x,frag_type=type,descriptor=descriptor_string,streams=a,b,c + id=y,seg_duration=y,frag_type=type,streams=d,e" and so on, + descriptor is useful to the scheme defined by ISO/IEC + 23009-1:2014/Amd.2:2015. For example, -adaptation_sets + "id=0,descriptor=,streams=v". Please note that descriptor + string should be a self-closing xml tag. seg_duration, + frag_duration and frag_type override the global option values for + each adaptation set. For example, -adaptation_sets + "id=0,seg_duration=2,frag_duration=1,frag_type=duration,streams=v + id=1,seg_duration=2,frag_type=none,streams=a" type_id marks an + adaptation set as containing streams meant to be used for Trick + Mode for the referenced adaptation set. For example, + -adaptation_sets "id=0,seg_duration=2,frag_type=none,streams=0 + id=1,seg_duration=10,frag_type=none,trick_id=0,streams=1" + + timeout timeout + Set timeout for socket I/O operations. Applicable only for HTTP + output. + + index_correction index_correction + Enable (1) or Disable (0) segment index correction logic. + Applicable only when use_template is enabled and use_timeline is + disabled. + + When enabled, the logic monitors the flow of segment indexes. If a + streams's segment index value is not at the expected real time + position, then the logic corrects that index value. + + Typically this logic is needed in live streaming use cases. The + network bandwidth fluctuations are common during long run + streaming. Each fluctuation can cause the segment indexes fall + behind the expected real time position. + + format_options options_list + Set container format (mp4/webm) options using a ":" separated list + of key=value parameters. Values containing ":" special characters + must be escaped. + + global_sidx global_sidx + Write global SIDX atom. Applicable only for single file, mp4 + output, non-streaming mode. + + dash_segment_type dash_segment_type + Possible values: + + auto + If this flag is set, the dash segment files format will be + selected based on the stream codec. This is the default mode. + + mp4 If this flag is set, the dash segment files will be in in + ISOBMFF format. + + webm + If this flag is set, the dash segment files will be in in WebM + format. + + ignore_io_errors ignore_io_errors + Ignore IO errors during open and write. Useful for long-duration + runs with network output. + + lhls lhls + Enable Low-latency HLS(LHLS). Adds #EXT-X-PREFETCH tag with current + segment's URI. Apple doesn't have an official spec for LHLS. + Meanwhile hls.js player folks are trying to standardize a open LHLS + spec. The draft spec is available in + https://github.com/video-dev/hlsjs-rfcs/blob/lhls-spec/proposals/0001-lhls.md + This option will also try to comply with the above open spec, till + Apple's spec officially supports it. Applicable only when + streaming and hls_playlist options are enabled. This is an + experimental feature. + + ldash ldash + Enable Low-latency Dash by constraining the presence and values of + some elements. + + master_m3u8_publish_rate master_m3u8_publish_rate + Publish master playlist repeatedly every after specified number of + segment intervals. + + write_prft write_prft + Write Producer Reference Time elements on supported streams. This + also enables writing prft boxes in the underlying muxer. Applicable + only when the utc_url option is enabled. It's set to auto by + default, in which case the muxer will attempt to enable it only in + modes that require it. + + mpd_profile mpd_profile + Set one or more manifest profiles. + + http_opts http_opts + A :-separated list of key=value options to pass to the underlying + HTTP protocol. Applicable only for HTTP output. + + target_latency target_latency + Set an intended target latency in seconds (fractional value can be + set) for serving. Applicable only when streaming and write_prft + options are enabled. This is an informative fields clients can use + to measure the latency of the service. + + min_playback_rate min_playback_rate + Set the minimum playback rate indicated as appropriate for the + purposes of automatically adjusting playback latency and buffer + occupancy during normal playback by clients. + + max_playback_rate max_playback_rate + Set the maximum playback rate indicated as appropriate for the + purposes of automatically adjusting playback latency and buffer + occupancy during normal playback by clients. + + update_period update_period + Set the mpd update period ,for dynamic content. + The unit is second. + + framecrc + Per-packet CRC (Cyclic Redundancy Check) testing format. + + This muxer computes and prints the Adler-32 CRC for each audio and + video packet. By default audio frames are converted to signed 16-bit + raw audio and video frames to raw video before computing the CRC. + + The output of the muxer consists of a line for each audio and video + packet of the form: + + , , , , , 0x + + CRC is a hexadecimal number 0-padded to 8 digits containing the CRC of + the packet. + + Examples + + For example to compute the CRC of the audio and video frames in INPUT, + converted to raw audio and video packets, and store it in the file + out.crc: + + ffmpeg -i INPUT -f framecrc out.crc + + To print the information to stdout, use the command: + + ffmpeg -i INPUT -f framecrc - + + With ffmpeg, you can select the output format to which the audio and + video frames are encoded before computing the CRC for each packet by + specifying the audio and video codec. For example, to compute the CRC + of each decoded input audio frame converted to PCM unsigned 8-bit and + of each decoded input video frame converted to MPEG-2 video, use the + command: + + ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc - + + See also the crc muxer. + + framehash + Per-packet hash testing format. + + This muxer computes and prints a cryptographic hash for each audio and + video packet. This can be used for packet-by-packet equality checks + without having to individually do a binary comparison on each. + + By default audio frames are converted to signed 16-bit raw audio and + video frames to raw video before computing the hash, but the output of + explicit conversions to other codecs can also be used. It uses the + SHA-256 cryptographic hash function by default, but supports several + other algorithms. + + The output of the muxer consists of a line for each audio and video + packet of the form: + + , , , , , + + hash is a hexadecimal number representing the computed hash for the + packet. + + hash algorithm + Use the cryptographic hash function specified by the string + algorithm. Supported values include "MD5", "murmur3", "RIPEMD128", + "RIPEMD160", "RIPEMD256", "RIPEMD320", "SHA160", "SHA224", "SHA256" + (default), "SHA512/224", "SHA512/256", "SHA384", "SHA512", "CRC32" + and "adler32". + + Examples + + To compute the SHA-256 hash of the audio and video frames in INPUT, + converted to raw audio and video packets, and store it in the file + out.sha256: + + ffmpeg -i INPUT -f framehash out.sha256 + + To print the information to stdout, using the MD5 hash function, use + the command: + + ffmpeg -i INPUT -f framehash -hash md5 - + + See also the hash muxer. + + framemd5 + Per-packet MD5 testing format. + + This is a variant of the framehash muxer. Unlike that muxer, it + defaults to using the MD5 hash function. + + Examples + + To compute the MD5 hash of the audio and video frames in INPUT, + converted to raw audio and video packets, and store it in the file + out.md5: + + ffmpeg -i INPUT -f framemd5 out.md5 + + To print the information to stdout, use the command: + + ffmpeg -i INPUT -f framemd5 - + + See also the framehash and md5 muxers. + + gif + Animated GIF muxer. + + It accepts the following options: + + loop + Set the number of times to loop the output. Use "-1" for no loop, 0 + for looping indefinitely (default). + + final_delay + Force the delay (expressed in centiseconds) after the last frame. + Each frame ends with a delay until the next frame. The default is + "-1", which is a special value to tell the muxer to re-use the + previous delay. In case of a loop, you might want to customize this + value to mark a pause for instance. + + For example, to encode a gif looping 10 times, with a 5 seconds delay + between the loops: + + ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif + + Note 1: if you wish to extract the frames into separate GIF files, you + need to force the image2 muxer: + + ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif" + + Note 2: the GIF format has a very large time base: the delay between + two frames can therefore not be smaller than one centi second. + + hash + Hash testing format. + + This muxer computes and prints a cryptographic hash of all the input + audio and video frames. This can be used for equality checks without + having to do a complete binary comparison. + + By default audio frames are converted to signed 16-bit raw audio and + video frames to raw video before computing the hash, but the output of + explicit conversions to other codecs can also be used. Timestamps are + ignored. It uses the SHA-256 cryptographic hash function by default, + but supports several other algorithms. + + The output of the muxer consists of a single line of the form: + algo=hash, where algo is a short string representing the hash function + used, and hash is a hexadecimal number representing the computed hash. + + hash algorithm + Use the cryptographic hash function specified by the string + algorithm. Supported values include "MD5", "murmur3", "RIPEMD128", + "RIPEMD160", "RIPEMD256", "RIPEMD320", "SHA160", "SHA224", "SHA256" + (default), "SHA512/224", "SHA512/256", "SHA384", "SHA512", "CRC32" + and "adler32". + + Examples + + To compute the SHA-256 hash of the input converted to raw audio and + video, and store it in the file out.sha256: + + ffmpeg -i INPUT -f hash out.sha256 + + To print an MD5 hash to stdout use the command: + + ffmpeg -i INPUT -f hash -hash md5 - + + See also the framehash muxer. + + hls + Apple HTTP Live Streaming muxer that segments MPEG-TS according to the + HTTP Live Streaming (HLS) specification. + + It creates a playlist file, and one or more segment files. The output + filename specifies the playlist filename. + + By default, the muxer creates a file for each segment produced. These + files have the same name as the playlist, followed by a sequential + number and a .ts extension. + + Make sure to require a closed GOP when encoding and to set the GOP size + to fit your segment time constraint. + + For example, to convert an input file with ffmpeg: + + ffmpeg -i in.mkv -c:v h264 -flags +cgop -g 30 -hls_time 1 out.m3u8 + + This example will produce the playlist, out.m3u8, and segment files: + out0.ts, out1.ts, out2.ts, etc. + + See also the segment muxer, which provides a more generic and flexible + implementation of a segmenter, and can be used to perform HLS + segmentation. + + Options + + This muxer supports the following options: + + hls_init_time duration + Set the initial target segment length. Default value is 0. + + duration must be a time duration specification, see the Time + duration section in the ffmpeg-utils(1) manual. + + Segment will be cut on the next key frame after this time has + passed on the first m3u8 list. After the initial playlist is + filled ffmpeg will cut segments at duration equal to "hls_time" + + hls_time duration + Set the target segment length. Default value is 2. + + duration must be a time duration specification, see the Time + duration section in the ffmpeg-utils(1) manual. Segment will be + cut on the next key frame after this time has passed. + + hls_list_size size + Set the maximum number of playlist entries. If set to 0 the list + file will contain all the segments. Default value is 5. + + hls_delete_threshold size + Set the number of unreferenced segments to keep on disk before + "hls_flags delete_segments" deletes them. Increase this to allow + continue clients to download segments which were recently + referenced in the playlist. Default value is 1, meaning segments + older than "hls_list_size+1" will be deleted. + + hls_ts_options options_list + Set output format options using a :-separated list of key=value + parameters. Values containing ":" special characters must be + escaped. + + hls_wrap wrap + This is a deprecated option, you can use "hls_list_size" and + "hls_flags delete_segments" instead it + + This option is useful to avoid to fill the disk with many segment + files, and limits the maximum number of segment files written to + disk to wrap. + + hls_start_number_source + Start the playlist sequence number ("#EXT-X-MEDIA-SEQUENCE") + according to the specified source. Unless "hls_flags single_file" + is set, it also specifies source of starting sequence numbers of + segment and subtitle filenames. In any case, if "hls_flags + append_list" is set and read playlist sequence number is greater + than the specified start sequence number, then that value will be + used as start value. + + It accepts the following values: + + generic (default) + Set the starting sequence numbers according to start_number + option value. + + epoch + The start number will be the seconds since epoch (1970-01-01 + 00:00:00) + + epoch_us + The start number will be the microseconds since epoch + (1970-01-01 00:00:00) + + datetime + The start number will be based on the current date/time as + YYYYmmddHHMMSS. e.g. 20161231235759. + + start_number number + Start the playlist sequence number ("#EXT-X-MEDIA-SEQUENCE") from + the specified number when hls_start_number_source value is generic. + (This is the default case.) Unless "hls_flags single_file" is set, + it also specifies starting sequence numbers of segment and subtitle + filenames. Default value is 0. + + hls_allow_cache allowcache + Explicitly set whether the client MAY (1) or MUST NOT (0) cache + media segments. + + hls_base_url baseurl + Append baseurl to every entry in the playlist. Useful to generate + playlists with absolute paths. + + Note that the playlist sequence number must be unique for each + segment and it is not to be confused with the segment filename + sequence number which can be cyclic, for example if the wrap option + is specified. + + hls_segment_filename filename + Set the segment filename. Unless "hls_flags single_file" is set, + filename is used as a string format with the segment number: + + ffmpeg -i in.nut -hls_segment_filename 'file%03d.ts' out.m3u8 + + This example will produce the playlist, out.m3u8, and segment + files: file000.ts, file001.ts, file002.ts, etc. + + filename may contain full path or relative path specification, but + only the file name part without any path info will be contained in + the m3u8 segment list. Should a relative path be specified, the + path of the created segment files will be relative to the current + working directory. When strftime_mkdir is set, the whole expanded + value of filename will be written into the m3u8 segment list. + + When "var_stream_map" is set with two or more variant streams, the + filename pattern must contain the string "%v", this string + specifies the position of variant stream index in the generated + segment file names. + + ffmpeg -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0 v:1,a:1" \ + -hls_segment_filename 'file_%v_%03d.ts' out_%v.m3u8 + + This example will produce the playlists segment file sets: + file_0_000.ts, file_0_001.ts, file_0_002.ts, etc. and + file_1_000.ts, file_1_001.ts, file_1_002.ts, etc. + + The string "%v" may be present in the filename or in the last + directory name containing the file, but only in one of them. + (Additionally, %v may appear multiple times in the last sub- + directory or filename.) If the string %v is present in the + directory name, then sub-directories are created after expanding + the directory name pattern. This enables creation of segments + corresponding to different variant streams in subdirectories. + + ffmpeg -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0 v:1,a:1" \ + -hls_segment_filename 'vs%v/file_%03d.ts' vs%v/out.m3u8 + + This example will produce the playlists segment file sets: + vs0/file_000.ts, vs0/file_001.ts, vs0/file_002.ts, etc. and + vs1/file_000.ts, vs1/file_001.ts, vs1/file_002.ts, etc. + + use_localtime + Same as strftime option, will be deprecated. + + strftime + Use strftime() on filename to expand the segment filename with + localtime. The segment number is also available in this mode, but + to use it, you need to specify second_level_segment_index hls_flag + and %%d will be the specifier. + + ffmpeg -i in.nut -strftime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8 + + This example will produce the playlist, out.m3u8, and segment + files: file-20160215-1455569023.ts, file-20160215-1455569024.ts, + etc. Note: On some systems/environments, the %s specifier is not + available. See + "strftime()" documentation. + + ffmpeg -i in.nut -strftime 1 -hls_flags second_level_segment_index -hls_segment_filename 'file-%Y%m%d-%%04d.ts' out.m3u8 + + This example will produce the playlist, out.m3u8, and segment + files: file-20160215-0001.ts, file-20160215-0002.ts, etc. + + use_localtime_mkdir + Same as strftime_mkdir option, will be deprecated . + + strftime_mkdir + Used together with -strftime_mkdir, it will create all + subdirectories which is expanded in filename. + + ffmpeg -i in.nut -strftime 1 -strftime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8 + + This example will create a directory 201560215 (if it does not + exist), and then produce the playlist, out.m3u8, and segment files: + 20160215/file-20160215-1455569023.ts, + 20160215/file-20160215-1455569024.ts, etc. + + ffmpeg -i in.nut -strftime 1 -strftime_mkdir 1 -hls_segment_filename '%Y/%m/%d/file-%Y%m%d-%s.ts' out.m3u8 + + This example will create a directory hierarchy 2016/02/15 (if any + of them do not exist), and then produce the playlist, out.m3u8, and + segment files: 2016/02/15/file-20160215-1455569023.ts, + 2016/02/15/file-20160215-1455569024.ts, etc. + + hls_key_info_file key_info_file + Use the information in key_info_file for segment encryption. The + first line of key_info_file specifies the key URI written to the + playlist. The key URL is used to access the encryption key during + playback. The second line specifies the path to the key file used + to obtain the key during the encryption process. The key file is + read as a single packed array of 16 octets in binary format. The + optional third line specifies the initialization vector (IV) as a + hexadecimal string to be used instead of the segment sequence + number (default) for encryption. Changes to key_info_file will + result in segment encryption with the new key/IV and an entry in + the playlist for the new key URI/IV if "hls_flags periodic_rekey" + is enabled. + + Key info file format: + + + + (optional) + + Example key URIs: + + http://server/file.key + /path/to/file.key + file.key + + Example key file paths: + + file.key + /path/to/file.key + + Example IV: + + 0123456789ABCDEF0123456789ABCDEF + + Key info file example: + + http://server/file.key + /path/to/file.key + 0123456789ABCDEF0123456789ABCDEF + + Example shell script: + + #!/bin/sh + BASE_URL=${1:-'.'} + openssl rand 16 > file.key + echo $BASE_URL/file.key > file.keyinfo + echo file.key >> file.keyinfo + echo $(openssl rand -hex 16) >> file.keyinfo + ffmpeg -f lavfi -re -i testsrc -c:v h264 -hls_flags delete_segments \ + -hls_key_info_file file.keyinfo out.m3u8 + + -hls_enc enc + Enable (1) or disable (0) the AES128 encryption. When enabled + every segment generated is encrypted and the encryption key is + saved as playlist name.key. + + -hls_enc_key key + 16-octet key to encrypt the segments, by default it is randomly + generated. + + -hls_enc_key_url keyurl + If set, keyurl is prepended instead of baseurl to the key filename + in the playlist. + + -hls_enc_iv iv + 16-octet initialization vector for every segment instead of the + autogenerated ones. + + hls_segment_type flags + Possible values: + + mpegts + Output segment files in MPEG-2 Transport Stream format. This is + compatible with all HLS versions. + + fmp4 + Output segment files in fragmented MP4 format, similar to MPEG- + DASH. fmp4 files may be used in HLS version 7 and above. + + hls_fmp4_init_filename filename + Set filename to the fragment files header file, default filename is + init.mp4. + + Use "-strftime 1" on filename to expand the segment filename with + localtime. + + ffmpeg -i in.nut -hls_segment_type fmp4 -strftime 1 -hls_fmp4_init_filename "%s_init.mp4" out.m3u8 + + This will produce init like this 1602678741_init.mp4 + + hls_fmp4_init_resend + Resend init file after m3u8 file refresh every time, default is 0. + + When "var_stream_map" is set with two or more variant streams, the + filename pattern must contain the string "%v", this string + specifies the position of variant stream index in the generated + init file names. The string "%v" may be present in the filename or + in the last directory name containing the file. If the string is + present in the directory name, then sub-directories are created + after expanding the directory name pattern. This enables creation + of init files corresponding to different variant streams in + subdirectories. + + hls_flags flags + Possible values: + + single_file + If this flag is set, the muxer will store all segments in a + single MPEG-TS file, and will use byte ranges in the playlist. + HLS playlists generated with this way will have the version + number 4. For example: + + ffmpeg -i in.nut -hls_flags single_file out.m3u8 + + Will produce the playlist, out.m3u8, and a single segment file, + out.ts. + + delete_segments + Segment files removed from the playlist are deleted after a + period of time equal to the duration of the segment plus the + duration of the playlist. + + append_list + Append new segments into the end of old segment list, and + remove the "#EXT-X-ENDLIST" from the old segment list. + + round_durations + Round the duration info in the playlist file segment info to + integer values, instead of using floating point. + + discont_start + Add the "#EXT-X-DISCONTINUITY" tag to the playlist, before the + first segment's information. + + omit_endlist + Do not append the "EXT-X-ENDLIST" tag at the end of the + playlist. + + periodic_rekey + The file specified by "hls_key_info_file" will be checked + periodically and detect updates to the encryption info. Be sure + to replace this file atomically, including the file containing + the AES encryption key. + + independent_segments + Add the "#EXT-X-INDEPENDENT-SEGMENTS" to playlists that has + video segments and when all the segments of that playlist are + guaranteed to start with a Key frame. + + iframes_only + Add the "#EXT-X-I-FRAMES-ONLY" to playlists that has video + segments and can play only I-frames in the "#EXT-X-BYTERANGE" + mode. + + split_by_time + Allow segments to start on frames other than keyframes. This + improves behavior on some players when the time between + keyframes is inconsistent, but may make things worse on others, + and can cause some oddities during seeking. This flag should be + used with the "hls_time" option. + + program_date_time + Generate "EXT-X-PROGRAM-DATE-TIME" tags. + + second_level_segment_index + Makes it possible to use segment indexes as %%d in + hls_segment_filename expression besides date/time values when + strftime is on. To get fixed width numbers with trailing + zeroes, %%0xd format is available where x is the required + width. + + second_level_segment_size + Makes it possible to use segment sizes (counted in bytes) as + %%s in hls_segment_filename expression besides date/time values + when strftime is on. To get fixed width numbers with trailing + zeroes, %%0xs format is available where x is the required + width. + + second_level_segment_duration + Makes it possible to use segment duration (calculated in + microseconds) as %%t in hls_segment_filename expression besides + date/time values when strftime is on. To get fixed width + numbers with trailing zeroes, %%0xt format is available where x + is the required width. + + ffmpeg -i sample.mpeg \ + -f hls -hls_time 3 -hls_list_size 5 \ + -hls_flags second_level_segment_index+second_level_segment_size+second_level_segment_duration \ + -strftime 1 -strftime_mkdir 1 -hls_segment_filename "segment_%Y%m%d%H%M%S_%%04d_%%08s_%%013t.ts" stream.m3u8 + + This will produce segments like this: + segment_20170102194334_0003_00122200_0000003000000.ts, + segment_20170102194334_0004_00120072_0000003000000.ts etc. + + temp_file + Write segment data to filename.tmp and rename to filename only + once the segment is complete. A webserver serving up segments + can be configured to reject requests to *.tmp to prevent access + to in-progress segments before they have been added to the m3u8 + playlist. This flag also affects how m3u8 playlist files are + created. If this flag is set, all playlist files will written + into temporary file and renamed after they are complete, + similarly as segments are handled. But playlists with "file" + protocol and with type ("hls_playlist_type") other than "vod" + are always written into temporary file regardless of this flag. + Master playlist files ("master_pl_name"), if any, with "file" + protocol, are always written into temporary file regardless of + this flag if "master_pl_publish_rate" value is other than zero. + + hls_playlist_type event + Emit "#EXT-X-PLAYLIST-TYPE:EVENT" in the m3u8 header. Forces + hls_list_size to 0; the playlist can only be appended to. + + hls_playlist_type vod + Emit "#EXT-X-PLAYLIST-TYPE:VOD" in the m3u8 header. Forces + hls_list_size to 0; the playlist must not change. + + method + Use the given HTTP method to create the hls files. + + ffmpeg -re -i in.ts -f hls -method PUT http://example.com/live/out.m3u8 + + This example will upload all the mpegts segment files to the HTTP + server using the HTTP PUT method, and update the m3u8 files every + "refresh" times using the same method. Note that the HTTP server + must support the given method for uploading files. + + http_user_agent + Override User-Agent field in HTTP header. Applicable only for HTTP + output. + + var_stream_map + Map string which specifies how to group the audio, video and + subtitle streams into different variant streams. The variant stream + groups are separated by space. Expected string format is like this + "a:0,v:0 a:1,v:1 ....". Here a:, v:, s: are the keys to specify + audio, video and subtitle streams respectively. Allowed values are + 0 to 9 (limited just based on practical usage). + + When there are two or more variant streams, the output filename + pattern must contain the string "%v", this string specifies the + position of variant stream index in the output media playlist + filenames. The string "%v" may be present in the filename or in the + last directory name containing the file. If the string is present + in the directory name, then sub-directories are created after + expanding the directory name pattern. This enables creation of + variant streams in subdirectories. + + ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0 v:1,a:1" \ + http://example.com/live/out_%v.m3u8 + + This example creates two hls variant streams. The first variant + stream will contain video stream of bitrate 1000k and audio stream + of bitrate 64k and the second variant stream will contain video + stream of bitrate 256k and audio stream of bitrate 32k. Here, two + media playlist with file names out_0.m3u8 and out_1.m3u8 will be + created. If you want something meaningful text instead of indexes + in result names, you may specify names for each or some of the + variants as in the following example. + + ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0,name:my_hd v:1,a:1,name:my_sd" \ + http://example.com/live/out_%v.m3u8 + + This example creates two hls variant streams as in the previous + one. But here, the two media playlist with file names + out_my_hd.m3u8 and out_my_sd.m3u8 will be created. + + ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k \ + -map 0:v -map 0:a -map 0:v -f hls -var_stream_map "v:0 a:0 v:1" \ + http://example.com/live/out_%v.m3u8 + + This example creates three hls variant streams. The first variant + stream will be a video only stream with video bitrate 1000k, the + second variant stream will be an audio only stream with bitrate 64k + and the third variant stream will be a video only stream with + bitrate 256k. Here, three media playlist with file names + out_0.m3u8, out_1.m3u8 and out_2.m3u8 will be created. + + ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls -var_stream_map "v:0,a:0 v:1,a:1" \ + http://example.com/live/vs_%v/out.m3u8 + + This example creates the variant streams in subdirectories. Here, + the first media playlist is created at + http://example.com/live/vs_0/out.m3u8 and the second one at + http://example.com/live/vs_1/out.m3u8. + + ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k -b:v:1 3000k \ + -map 0:a -map 0:a -map 0:v -map 0:v -f hls \ + -var_stream_map "a:0,agroup:aud_low a:1,agroup:aud_high v:0,agroup:aud_low v:1,agroup:aud_high" \ + -master_pl_name master.m3u8 \ + http://example.com/live/out_%v.m3u8 + + This example creates two audio only and two video only variant + streams. In addition to the #EXT-X-STREAM-INF tag for each variant + stream in the master playlist, #EXT-X-MEDIA tag is also added for + the two audio only variant streams and they are mapped to the two + video only variant streams with audio group names 'aud_low' and + 'aud_high'. + + By default, a single hls variant containing all the encoded streams + is created. + + ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \ + -map 0:a -map 0:a -map 0:v -f hls \ + -var_stream_map "a:0,agroup:aud_low,default:yes a:1,agroup:aud_low v:0,agroup:aud_low" \ + -master_pl_name master.m3u8 \ + http://example.com/live/out_%v.m3u8 + + This example creates two audio only and one video only variant + streams. In addition to the #EXT-X-STREAM-INF tag for each variant + stream in the master playlist, #EXT-X-MEDIA tag is also added for + the two audio only variant streams and they are mapped to the one + video only variant streams with audio group name 'aud_low', and the + audio group have default stat is NO or YES. + + By default, a single hls variant containing all the encoded streams + is created. + + ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \ + -map 0:a -map 0:a -map 0:v -f hls \ + -var_stream_map "a:0,agroup:aud_low,default:yes,language:ENG a:1,agroup:aud_low,language:CHN v:0,agroup:aud_low" \ + -master_pl_name master.m3u8 \ + http://example.com/live/out_%v.m3u8 + + This example creates two audio only and one video only variant + streams. In addition to the #EXT-X-STREAM-INF tag for each variant + stream in the master playlist, #EXT-X-MEDIA tag is also added for + the two audio only variant streams and they are mapped to the one + video only variant streams with audio group name 'aud_low', and the + audio group have default stat is NO or YES, and one audio have and + language is named ENG, the other audio language is named CHN. + + By default, a single hls variant containing all the encoded streams + is created. + + ffmpeg -y -i input_with_subtitle.mkv \ + -b:v:0 5250k -c:v h264 -pix_fmt yuv420p -profile:v main -level 4.1 \ + -b:a:0 256k \ + -c:s webvtt -c:a mp2 -ar 48000 -ac 2 -map 0:v -map 0:a:0 -map 0:s:0 \ + -f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle" \ + -master_pl_name master.m3u8 -t 300 -hls_time 10 -hls_init_time 4 -hls_list_size \ + 10 -master_pl_publish_rate 10 -hls_flags \ + delete_segments+discont_start+split_by_time ./tmp/video.m3u8 + + This example adds "#EXT-X-MEDIA" tag with "TYPE=SUBTITLES" in the + master playlist with webvtt subtitle group name 'subtitle'. Please + make sure the input file has one text subtitle stream at least. + + cc_stream_map + Map string which specifies different closed captions groups and + their attributes. The closed captions stream groups are separated + by space. Expected string format is like this "ccgroup:,instreamid:,language: ....". + 'ccgroup' and 'instreamid' are mandatory attributes. 'language' is + an optional attribute. The closed captions groups configured using + this option are mapped to different variant streams by providing + the same 'ccgroup' name in the "var_stream_map" string. If + "var_stream_map" is not set, then the first available ccgroup in + "cc_stream_map" is mapped to the output variant stream. The + examples for these two use cases are given below. + + ffmpeg -re -i in.ts -b:v 1000k -b:a 64k -a53cc 1 -f hls \ + -cc_stream_map "ccgroup:cc,instreamid:CC1,language:en" \ + -master_pl_name master.m3u8 \ + http://example.com/live/out.m3u8 + + This example adds "#EXT-X-MEDIA" tag with "TYPE=CLOSED-CAPTIONS" in + the master playlist with group name 'cc', language 'en' (english) + and INSTREAM-ID 'CC1'. Also, it adds "CLOSED-CAPTIONS" attribute + with group name 'cc' for the output variant stream. + + ffmpeg -re -i in.ts -b:v:0 1000k -b:v:1 256k -b:a:0 64k -b:a:1 32k \ + -a53cc:0 1 -a53cc:1 1\ + -map 0:v -map 0:a -map 0:v -map 0:a -f hls \ + -cc_stream_map "ccgroup:cc,instreamid:CC1,language:en ccgroup:cc,instreamid:CC2,language:sp" \ + -var_stream_map "v:0,a:0,ccgroup:cc v:1,a:1,ccgroup:cc" \ + -master_pl_name master.m3u8 \ + http://example.com/live/out_%v.m3u8 + + This example adds two "#EXT-X-MEDIA" tags with + "TYPE=CLOSED-CAPTIONS" in the master playlist for the INSTREAM-IDs + 'CC1' and 'CC2'. Also, it adds "CLOSED-CAPTIONS" attribute with + group name 'cc' for the two output variant streams. + + master_pl_name + Create HLS master playlist with the given name. + + ffmpeg -re -i in.ts -f hls -master_pl_name master.m3u8 http://example.com/live/out.m3u8 + + This example creates HLS master playlist with name master.m3u8 and + it is published at http://example.com/live/ + + master_pl_publish_rate + Publish master play list repeatedly every after specified number of + segment intervals. + + ffmpeg -re -i in.ts -f hls -master_pl_name master.m3u8 \ + -hls_time 2 -master_pl_publish_rate 30 http://example.com/live/out.m3u8 + + This example creates HLS master playlist with name master.m3u8 and + keep publishing it repeatedly every after 30 segments i.e. every + after 60s. + + http_persistent + Use persistent HTTP connections. Applicable only for HTTP output. + + timeout + Set timeout for socket I/O operations. Applicable only for HTTP + output. + + -ignore_io_errors + Ignore IO errors during open, write and delete. Useful for long- + duration runs with network output. + + headers + Set custom HTTP headers, can override built in default headers. + Applicable only for HTTP output. + + ico + ICO file muxer. + + Microsoft's icon file format (ICO) has some strict limitations that + should be noted: + + o Size cannot exceed 256 pixels in any dimension + + o Only BMP and PNG images can be stored + + o If a BMP image is used, it must be one of the following pixel + formats: + + BMP Bit Depth FFmpeg Pixel Format + 1bit pal8 + 4bit pal8 + 8bit pal8 + 16bit rgb555le + 24bit bgr24 + 32bit bgra + + o If a BMP image is used, it must use the BITMAPINFOHEADER DIB header + + o If a PNG image is used, it must use the rgba pixel format + + image2 + Image file muxer. + + The image file muxer writes video frames to image files. + + The output filenames are specified by a pattern, which can be used to + produce sequentially numbered series of files. The pattern may contain + the string "%d" or "%0Nd", this string specifies the position of the + characters representing a numbering in the filenames. If the form + "%0Nd" is used, the string representing the number in each filename is + 0-padded to N digits. The literal character '%' can be specified in the + pattern with the string "%%". + + If the pattern contains "%d" or "%0Nd", the first filename of the file + list specified will contain the number 1, all the following numbers + will be sequential. + + The pattern may contain a suffix which is used to automatically + determine the format of the image files to write. + + For example the pattern "img-%03d.bmp" will specify a sequence of + filenames of the form img-001.bmp, img-002.bmp, ..., img-010.bmp, etc. + The pattern "img%%-%d.jpg" will specify a sequence of filenames of the + form img%-1.jpg, img%-2.jpg, ..., img%-10.jpg, etc. + + The image muxer supports the .Y.U.V image file format. This format is + special in that that each image frame consists of three files, for each + of the YUV420P components. To read or write this image file format, + specify the name of the '.Y' file. The muxer will automatically open + the '.U' and '.V' files as required. + + Options + + frame_pts + If set to 1, expand the filename with pts from pkt->pts. Default + value is 0. + + start_number + Start the sequence from the specified number. Default value is 1. + + update + If set to 1, the filename will always be interpreted as just a + filename, not a pattern, and the corresponding file will be + continuously overwritten with new images. Default value is 0. + + strftime + If set to 1, expand the filename with date and time information + from "strftime()". Default value is 0. + + protocol_opts options_list + Set protocol options as a :-separated list of key=value parameters. + Values containing the ":" special character must be escaped. + + Examples + + The following example shows how to use ffmpeg for creating a sequence + of files img-001.jpeg, img-002.jpeg, ..., taking one image every second + from the input video: + + ffmpeg -i in.avi -vsync cfr -r 1 -f image2 'img-%03d.jpeg' + + Note that with ffmpeg, if the format is not specified with the "-f" + option and the output filename specifies an image file format, the + image2 muxer is automatically selected, so the previous command can be + written as: + + ffmpeg -i in.avi -vsync cfr -r 1 'img-%03d.jpeg' + + Note also that the pattern must not necessarily contain "%d" or "%0Nd", + for example to create a single image file img.jpeg from the start of + the input video you can employ the command: + + ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg + + The strftime option allows you to expand the filename with date and + time information. Check the documentation of the "strftime()" function + for the syntax. + + For example to generate image files from the "strftime()" + "%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg command can be used: + + ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg" + + You can set the file name with current frame's PTS: + + ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg" + + A more complex example is to publish contents of your desktop directly + to a WebDAV server every second: + + ffmpeg -f x11grab -framerate 1 -i :0.0 -q:v 6 -update 1 -protocol_opts method=PUT http://example.com/desktop.jpg + + matroska + Matroska container muxer. + + This muxer implements the matroska and webm container specs. + + Metadata + + The recognized metadata settings in this muxer are: + + title + Set title name provided to a single track. This gets mapped to the + FileDescription element for a stream written as attachment. + + language + Specify the language of the track in the Matroska languages form. + + The language can be either the 3 letters bibliographic ISO-639-2 + (ISO 639-2/B) form (like "fre" for French), or a language code + mixed with a country code for specialities in languages (like "fre- + ca" for Canadian French). + + stereo_mode + Set stereo 3D video layout of two views in a single video track. + + The following values are recognized: + + mono + video is not stereo + + left_right + Both views are arranged side by side, Left-eye view is on the + left + + bottom_top + Both views are arranged in top-bottom orientation, Left-eye + view is at bottom + + top_bottom + Both views are arranged in top-bottom orientation, Left-eye + view is on top + + checkerboard_rl + Each view is arranged in a checkerboard interleaved pattern, + Left-eye view being first + + checkerboard_lr + Each view is arranged in a checkerboard interleaved pattern, + Right-eye view being first + + row_interleaved_rl + Each view is constituted by a row based interleaving, Right-eye + view is first row + + row_interleaved_lr + Each view is constituted by a row based interleaving, Left-eye + view is first row + + col_interleaved_rl + Both views are arranged in a column based interleaving manner, + Right-eye view is first column + + col_interleaved_lr + Both views are arranged in a column based interleaving manner, + Left-eye view is first column + + anaglyph_cyan_red + All frames are in anaglyph format viewable through red-cyan + filters + + right_left + Both views are arranged side by side, Right-eye view is on the + left + + anaglyph_green_magenta + All frames are in anaglyph format viewable through green- + magenta filters + + block_lr + Both eyes laced in one Block, Left-eye view is first + + block_rl + Both eyes laced in one Block, Right-eye view is first + + For example a 3D WebM clip can be created using the following command + line: + + ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm + + Options + + This muxer supports the following options: + + reserve_index_space + By default, this muxer writes the index for seeking (called cues in + Matroska terms) at the end of the file, because it cannot know in + advance how much space to leave for the index at the beginning of + the file. However for some use cases -- e.g. streaming where + seeking is possible but slow -- it is useful to put the index at + the beginning of the file. + + If this option is set to a non-zero value, the muxer will reserve a + given amount of space in the file header and then try to write the + cues there when the muxing finishes. If the reserved space does not + suffice, no Cues will be written, the file will be finalized and + writing the trailer will return an error. A safe size for most use + cases should be about 50kB per hour of video. + + Note that cues are only written if the output is seekable and this + option will have no effect if it is not. + + default_mode + This option controls how the FlagDefault of the output tracks will + be set. It influences which tracks players should play by default. + The default mode is infer. + + infer + In this mode, for each type of track (audio, video or + subtitle), if there is a track with disposition default of this + type, then the first such track (i.e. the one with the lowest + index) will be marked as default; if no such track exists, the + first track of this type will be marked as default instead (if + existing). This ensures that the default flag is set in a + sensible way even if the input originated from containers that + lack the concept of default tracks. + + infer_no_subs + This mode is the same as infer except that if no subtitle track + with disposition default exists, no subtitle track will be + marked as default. + + passthrough + In this mode the FlagDefault is set if and only if the + AV_DISPOSITION_DEFAULT flag is set in the disposition of the + corresponding stream. + + flipped_raw_rgb + If set to true, store positive height for raw RGB bitmaps, which + indicates bitmap is stored bottom-up. Note that this option does + not flip the bitmap which has to be done manually beforehand, e.g. + by using the vflip filter. Default is false and indicates bitmap + is stored top down. + + md5 + MD5 testing format. + + This is a variant of the hash muxer. Unlike that muxer, it defaults to + using the MD5 hash function. + + Examples + + To compute the MD5 hash of the input converted to raw audio and video, + and store it in the file out.md5: + + ffmpeg -i INPUT -f md5 out.md5 + + You can print the MD5 to stdout with the command: + + ffmpeg -i INPUT -f md5 - + + See also the hash and framemd5 muxers. + + mov, mp4, ismv + MOV/MP4/ISMV (Smooth Streaming) muxer. + + The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4 file + has all the metadata about all packets stored in one location (written + at the end of the file, it can be moved to the start for better + playback by adding faststart to the movflags, or using the qt-faststart + tool). A fragmented file consists of a number of fragments, where + packets and metadata about these packets are stored together. Writing a + fragmented file has the advantage that the file is decodable even if + the writing is interrupted (while a normal MOV/MP4 is undecodable if it + is not properly finished), and it requires less memory when writing + very long files (since writing normal MOV/MP4 files stores info about + every single packet in memory until the file is closed). The downside + is that it is less compatible with other applications. + + Options + + Fragmentation is enabled by setting one of the AVOptions that define + how to cut the file into fragments: + + -moov_size bytes + Reserves space for the moov atom at the beginning of the file + instead of placing the moov atom at the end. If the space reserved + is insufficient, muxing will fail. + + -movflags frag_keyframe + Start a new fragment at each video keyframe. + + -frag_duration duration + Create fragments that are duration microseconds long. + + -frag_size size + Create fragments that contain up to size bytes of payload data. + + -movflags frag_custom + Allow the caller to manually choose when to cut fragments, by + calling "av_write_frame(ctx, NULL)" to write a fragment with the + packets written so far. (This is only useful with other + applications integrating libavformat, not from ffmpeg.) + + -min_frag_duration duration + Don't create fragments that are shorter than duration microseconds + long. + + If more than one condition is specified, fragments are cut when one of + the specified conditions is fulfilled. The exception to this is + "-min_frag_duration", which has to be fulfilled for any of the other + conditions to apply. + + Additionally, the way the output file is written can be adjusted + through a few other options: + + -movflags empty_moov + Write an initial moov atom directly at the start of the file, + without describing any samples in it. Generally, an mdat/moov pair + is written at the start of the file, as a normal MOV/MP4 file, + containing only a short portion of the file. With this option set, + there is no initial mdat atom, and the moov atom only describes the + tracks but has a zero duration. + + This option is implicitly set when writing ismv (Smooth Streaming) + files. + + -movflags separate_moof + Write a separate moof (movie fragment) atom for each track. + Normally, packets for all tracks are written in a moof atom (which + is slightly more efficient), but with this option set, the muxer + writes one moof/mdat pair for each track, making it easier to + separate tracks. + + This option is implicitly set when writing ismv (Smooth Streaming) + files. + + -movflags skip_sidx + Skip writing of sidx atom. When bitrate overhead due to sidx atom + is high, this option could be used for cases where sidx atom is not + mandatory. When global_sidx flag is enabled, this option will be + ignored. + + -movflags faststart + Run a second pass moving the index (moov atom) to the beginning of + the file. This operation can take a while, and will not work in + various situations such as fragmented output, thus it is not + enabled by default. + + -movflags rtphint + Add RTP hinting tracks to the output file. + + -movflags disable_chpl + Disable Nero chapter markers (chpl atom). Normally, both Nero + chapters and a QuickTime chapter track are written to the file. + With this option set, only the QuickTime chapter track will be + written. Nero chapters can cause failures when the file is + reprocessed with certain tagging programs, like mp3Tag 2.61a and + iTunes 11.3, most likely other versions are affected as well. + + -movflags omit_tfhd_offset + Do not write any absolute base_data_offset in tfhd atoms. This + avoids tying fragments to absolute byte positions in the + file/streams. + + -movflags default_base_moof + Similarly to the omit_tfhd_offset, this flag avoids writing the + absolute base_data_offset field in tfhd atoms, but does so by using + the new default-base-is-moof flag instead. This flag is new from + 14496-12:2012. This may make the fragments easier to parse in + certain circumstances (avoiding basing track fragment location + calculations on the implicit end of the previous track fragment). + + -write_tmcd + Specify "on" to force writing a timecode track, "off" to disable it + and "auto" to write a timecode track only for mov and mp4 output + (default). + + -movflags negative_cts_offsets + Enables utilization of version 1 of the CTTS box, in which the CTS + offsets can be negative. This enables the initial sample to have + DTS/CTS of zero, and reduces the need for edit lists for some cases + such as video tracks with B-frames. Additionally, eases conformance + with the DASH-IF interoperability guidelines. + + This option is implicitly set when writing ismv (Smooth Streaming) + files. + + -write_prft + Write producer time reference box (PRFT) with a specified time + source for the NTP field in the PRFT box. Set value as wallclock to + specify timesource as wallclock time and pts to specify timesource + as input packets' PTS values. + + Setting value to pts is applicable only for a live encoding use + case, where PTS values are set as as wallclock time at the source. + For example, an encoding use case with decklink capture source + where video_pts and audio_pts are set to abs_wallclock. + + Example + + Smooth Streaming content can be pushed in real time to a publishing + point on IIS with this muxer. Example: + + ffmpeg -re <> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1) + + mp3 + The MP3 muxer writes a raw MP3 stream with the following optional + features: + + o An ID3v2 metadata header at the beginning (enabled by default). + Versions 2.3 and 2.4 are supported, the "id3v2_version" private + option controls which one is used (3 or 4). Setting "id3v2_version" + to 0 disables the ID3v2 header completely. + + The muxer supports writing attached pictures (APIC frames) to the + ID3v2 header. The pictures are supplied to the muxer in form of a + video stream with a single packet. There can be any number of those + streams, each will correspond to a single APIC frame. The stream + metadata tags title and comment map to APIC description and picture + type respectively. See for + allowed picture types. + + Note that the APIC frames must be written at the beginning, so the + muxer will buffer the audio frames until it gets all the pictures. + It is therefore advised to provide the pictures as soon as possible + to avoid excessive buffering. + + o A Xing/LAME frame right after the ID3v2 header (if present). It is + enabled by default, but will be written only if the output is + seekable. The "write_xing" private option can be used to disable + it. The frame contains various information that may be useful to + the decoder, like the audio duration or encoder delay. + + o A legacy ID3v1 tag at the end of the file (disabled by default). It + may be enabled with the "write_id3v1" private option, but as its + capabilities are very limited, its usage is not recommended. + + Examples: + + Write an mp3 with an ID3v2.3 header and an ID3v1 footer: + + ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3 + + To attach a picture to an mp3 file select both the audio and the + picture stream with "map": + + ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1 + -metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3 + + Write a "clean" MP3 without any extra features: + + ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3 + + mpegts + MPEG transport stream muxer. + + This muxer implements ISO 13818-1 and part of ETSI EN 300 468. + + The recognized metadata settings in mpegts muxer are "service_provider" + and "service_name". If they are not set the default for + "service_provider" is FFmpeg and the default for "service_name" is + Service01. + + Options + + The muxer options are: + + mpegts_transport_stream_id integer + Set the transport_stream_id. This identifies a transponder in DVB. + Default is 0x0001. + + mpegts_original_network_id integer + Set the original_network_id. This is unique identifier of a network + in DVB. Its main use is in the unique identification of a service + through the path Original_Network_ID, Transport_Stream_ID. Default + is 0x0001. + + mpegts_service_id integer + Set the service_id, also known as program in DVB. Default is + 0x0001. + + mpegts_service_type integer + Set the program service_type. Default is "digital_tv". Accepts the + following options: + + hex_value + Any hexadecimal value between 0x01 and 0xff as defined in ETSI + 300 468. + + digital_tv + Digital TV service. + + digital_radio + Digital Radio service. + + teletext + Teletext service. + + advanced_codec_digital_radio + Advanced Codec Digital Radio service. + + mpeg2_digital_hdtv + MPEG2 Digital HDTV service. + + advanced_codec_digital_sdtv + Advanced Codec Digital SDTV service. + + advanced_codec_digital_hdtv + Advanced Codec Digital HDTV service. + + mpegts_pmt_start_pid integer + Set the first PID for PMTs. Default is 0x1000, minimum is 0x0020, + maximum is 0x1ffa. This option has no effect in m2ts mode where the + PMT PID is fixed 0x0100. + + mpegts_start_pid integer + Set the first PID for elementary streams. Default is 0x0100, + minimum is 0x0020, maximum is 0x1ffa. This option has no effect in + m2ts mode where the elementary stream PIDs are fixed. + + mpegts_m2ts_mode boolean + Enable m2ts mode if set to 1. Default value is "-1" which disables + m2ts mode. + + muxrate integer + Set a constant muxrate. Default is VBR. + + pes_payload_size integer + Set minimum PES packet payload in bytes. Default is 2930. + + mpegts_flags flags + Set mpegts flags. Accepts the following options: + + resend_headers + Reemit PAT/PMT before writing the next packet. + + latm + Use LATM packetization for AAC. + + pat_pmt_at_frames + Reemit PAT and PMT at each video frame. + + system_b + Conform to System B (DVB) instead of System A (ATSC). + + initial_discontinuity + Mark the initial packet of each stream as discontinuity. + + mpegts_copyts boolean + Preserve original timestamps, if value is set to 1. Default value + is "-1", which results in shifting timestamps so that they start + from 0. + + omit_video_pes_length boolean + Omit the PES packet length for video packets. Default is 1 (true). + + pcr_period integer + Override the default PCR retransmission time in milliseconds. + Default is "-1" which means that the PCR interval will be + determined automatically: 20 ms is used for CBR streams, the + highest multiple of the frame duration which is less than 100 ms is + used for VBR streams. + + pat_period duration + Maximum time in seconds between PAT/PMT tables. Default is 0.1. + + sdt_period duration + Maximum time in seconds between SDT tables. Default is 0.5. + + tables_version integer + Set PAT, PMT and SDT version (default 0, valid values are from 0 to + 31, inclusively). This option allows updating stream structure so + that standard consumer may detect the change. To do so, reopen + output "AVFormatContext" (in case of API usage) or restart ffmpeg + instance, cyclically changing tables_version value: + + ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111 + ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111 + ... + ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111 + ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111 + ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111 + ... + + Example + + ffmpeg -i file.mpg -c copy \ + -mpegts_original_network_id 0x1122 \ + -mpegts_transport_stream_id 0x3344 \ + -mpegts_service_id 0x5566 \ + -mpegts_pmt_start_pid 0x1500 \ + -mpegts_start_pid 0x150 \ + -metadata service_provider="Some provider" \ + -metadata service_name="Some Channel" \ + out.ts + + mxf, mxf_d10, mxf_opatom + MXF muxer. + + Options + + The muxer options are: + + store_user_comments bool + Set if user comments should be stored if available or never. IRT + D-10 does not allow user comments. The default is thus to write + them for mxf and mxf_opatom but not for mxf_d10 + + null + Null muxer. + + This muxer does not generate any output file, it is mainly useful for + testing or benchmarking purposes. + + For example to benchmark decoding with ffmpeg you can use the command: + + ffmpeg -benchmark -i INPUT -f null out.null + + Note that the above command does not read or write the out.null file, + but specifying the output file is required by the ffmpeg syntax. + + Alternatively you can write the command as: + + ffmpeg -benchmark -i INPUT -f null - + + nut + -syncpoints flags + Change the syncpoint usage in nut: + + default use the normal low-overhead seeking aids. + none do not use the syncpoints at all, reducing the overhead but + making the stream non-seekable; + Use of this option is not recommended, as the resulting files are very damage + sensitive and seeking is not possible. Also in general the overhead from + syncpoints is negligible. Note, -C 0 can be used to disable + all growing data tables, allowing to mux endless streams with limited memory + and without these disadvantages. + + timestamped extend the syncpoint with a wallclock field. + + The none and timestamped flags are experimental. + + -write_index bool + Write index at the end, the default is to write an index. + + ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor + + ogg + Ogg container muxer. + + -page_duration duration + Preferred page duration, in microseconds. The muxer will attempt to + create pages that are approximately duration microseconds long. + This allows the user to compromise between seek granularity and + container overhead. The default is 1 second. A value of 0 will fill + all segments, making pages as large as possible. A value of 1 will + effectively use 1 packet-per-page in most situations, giving a + small seek granularity at the cost of additional container + overhead. + + -serial_offset value + Serial value from which to set the streams serial number. Setting + it to different and sufficiently large values ensures that the + produced ogg files can be safely chained. + + segment, stream_segment, ssegment + Basic stream segmenter. + + This muxer outputs streams to a number of separate files of nearly + fixed duration. Output filename pattern can be set in a fashion similar + to image2, or by using a "strftime" template if the strftime option is + enabled. + + "stream_segment" is a variant of the muxer used to write to streaming + output formats, i.e. which do not require global headers, and is + recommended for outputting e.g. to MPEG transport stream segments. + "ssegment" is a shorter alias for "stream_segment". + + Every segment starts with a keyframe of the selected reference stream, + which is set through the reference_stream option. + + Note that if you want accurate splitting for a video file, you need to + make the input key frames correspond to the exact splitting times + expected by the segmenter, or the segment muxer will start the new + segment with the key frame found next after the specified start time. + + The segment muxer works best with a single constant frame rate video. + + Optionally it can generate a list of the created segments, by setting + the option segment_list. The list type is specified by the + segment_list_type option. The entry filenames in the segment list are + set by default to the basename of the corresponding segment files. + + See also the hls muxer, which provides a more specific implementation + for HLS segmentation. + + Options + + The segment muxer supports the following options: + + increment_tc 1|0 + if set to 1, increment timecode between each segment If this is + selected, the input need to have a timecode in the first video + stream. Default value is 0. + + reference_stream specifier + Set the reference stream, as specified by the string specifier. If + specifier is set to "auto", the reference is chosen automatically. + Otherwise it must be a stream specifier (see the ``Stream + specifiers'' chapter in the ffmpeg manual) which specifies the + reference stream. The default value is "auto". + + segment_format format + Override the inner container format, by default it is guessed by + the filename extension. + + segment_format_options options_list + Set output format options using a :-separated list of key=value + parameters. Values containing the ":" special character must be + escaped. + + segment_list name + Generate also a listfile named name. If not specified no listfile + is generated. + + segment_list_flags flags + Set flags affecting the segment list generation. + + It currently supports the following flags: + + cache + Allow caching (only affects M3U8 list files). + + live + Allow live-friendly file generation. + + segment_list_size size + Update the list file so that it contains at most size segments. If + 0 the list file will contain all the segments. Default value is 0. + + segment_list_entry_prefix prefix + Prepend prefix to each entry. Useful to generate absolute paths. + By default no prefix is applied. + + segment_list_type type + Select the listing format. + + The following values are recognized: + + flat + Generate a flat list for the created segments, one segment per + line. + + csv, ext + Generate a list for the created segments, one segment per line, + each line matching the format (comma-separated values): + + ,, + + segment_filename is the name of the output file generated by + the muxer according to the provided pattern. CSV escaping + (according to RFC4180) is applied if required. + + segment_start_time and segment_end_time specify the segment + start and end time expressed in seconds. + + A list file with the suffix ".csv" or ".ext" will auto-select + this format. + + ext is deprecated in favor or csv. + + ffconcat + Generate an ffconcat file for the created segments. The + resulting file can be read using the FFmpeg concat demuxer. + + A list file with the suffix ".ffcat" or ".ffconcat" will auto- + select this format. + + m3u8 + Generate an extended M3U8 file, version 3, compliant with + . + + A list file with the suffix ".m3u8" will auto-select this + format. + + If not specified the type is guessed from the list file name + suffix. + + segment_time time + Set segment duration to time, the value must be a duration + specification. Default value is "2". See also the segment_times + option. + + Note that splitting may not be accurate, unless you force the + reference stream key-frames at the given time. See the introductory + notice and the examples below. + + segment_atclocktime 1|0 + If set to "1" split at regular clock time intervals starting from + 00:00 o'clock. The time value specified in segment_time is used for + setting the length of the splitting interval. + + For example with segment_time set to "900" this makes it possible + to create files at 12:00 o'clock, 12:15, 12:30, etc. + + Default value is "0". + + segment_clocktime_offset duration + Delay the segment splitting times with the specified duration when + using segment_atclocktime. + + For example with segment_time set to "900" and + segment_clocktime_offset set to "300" this makes it possible to + create files at 12:05, 12:20, 12:35, etc. + + Default value is "0". + + segment_clocktime_wrap_duration duration + Force the segmenter to only start a new segment if a packet reaches + the muxer within the specified duration after the segmenting clock + time. This way you can make the segmenter more resilient to + backward local time jumps, such as leap seconds or transition to + standard time from daylight savings time. + + Default is the maximum possible duration which means starting a new + segment regardless of the elapsed time since the last clock time. + + segment_time_delta delta + Specify the accuracy time when selecting the start time for a + segment, expressed as a duration specification. Default value is + "0". + + When delta is specified a key-frame will start a new segment if its + PTS satisfies the relation: + + PTS >= start_time - time_delta + + This option is useful when splitting video content, which is always + split at GOP boundaries, in case a key frame is found just before + the specified split time. + + In particular may be used in combination with the ffmpeg option + force_key_frames. The key frame times specified by force_key_frames + may not be set accurately because of rounding issues, with the + consequence that a key frame time may result set just before the + specified time. For constant frame rate videos a value of + 1/(2*frame_rate) should address the worst case mismatch between the + specified time and the time set by force_key_frames. + + segment_times times + Specify a list of split points. times contains a list of comma + separated duration specifications, in increasing order. See also + the segment_time option. + + segment_frames frames + Specify a list of split video frame numbers. frames contains a list + of comma separated integer numbers, in increasing order. + + This option specifies to start a new segment whenever a reference + stream key frame is found and the sequential number (starting from + 0) of the frame is greater or equal to the next value in the list. + + segment_wrap limit + Wrap around segment index once it reaches limit. + + segment_start_number number + Set the sequence number of the first segment. Defaults to 0. + + strftime 1|0 + Use the "strftime" function to define the name of the new segments + to write. If this is selected, the output segment name must contain + a "strftime" function template. Default value is 0. + + break_non_keyframes 1|0 + If enabled, allow segments to start on frames other than keyframes. + This improves behavior on some players when the time between + keyframes is inconsistent, but may make things worse on others, and + can cause some oddities during seeking. Defaults to 0. + + reset_timestamps 1|0 + Reset timestamps at the beginning of each segment, so that each + segment will start with near-zero timestamps. It is meant to ease + the playback of the generated segments. May not work with some + combinations of muxers/codecs. It is set to 0 by default. + + initial_offset offset + Specify timestamp offset to apply to the output packet timestamps. + The argument must be a time duration specification, and defaults to + 0. + + write_empty_segments 1|0 + If enabled, write an empty segment if there are no packets during + the period a segment would usually span. Otherwise, the segment + will be filled with the next packet written. Defaults to 0. + + Make sure to require a closed GOP when encoding and to set the GOP size + to fit your segment time constraint. + + Examples + + o Remux the content of file in.mkv to a list of segments out-000.nut, + out-001.nut, etc., and write the list of generated segments to + out.list: + + ffmpeg -i in.mkv -codec hevc -flags +cgop -g 60 -map 0 -f segment -segment_list out.list out%03d.nut + + o Segment input and set output format options for the output + segments: + + ffmpeg -i in.mkv -f segment -segment_time 10 -segment_format_options movflags=+faststart out%03d.mp4 + + o Segment the input file according to the split points specified by + the segment_times option: + + ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut + + o Use the ffmpeg force_key_frames option to force key frames in the + input at the specified location, together with the segment option + segment_time_delta to account for possible roundings operated when + setting key frame times. + + ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \ + -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut + + In order to force key frames on the input file, transcoding is + required. + + o Segment the input file by splitting the input file according to the + frame numbers sequence specified with the segment_frames option: + + ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut + + o Convert the in.mkv to TS segments using the "libx264" and "aac" + encoders: + + ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a aac -f ssegment -segment_list out.list out%03d.ts + + o Segment the input file, and create an M3U8 live playlist (can be + used as live HLS source): + + ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \ + -segment_list_flags +live -segment_time 10 out%03d.mkv + + smoothstreaming + Smooth Streaming muxer generates a set of files (Manifest, chunks) + suitable for serving with conventional web server. + + window_size + Specify the number of fragments kept in the manifest. Default 0 + (keep all). + + extra_window_size + Specify the number of fragments kept outside of the manifest before + removing from disk. Default 5. + + lookahead_count + Specify the number of lookahead fragments. Default 2. + + min_frag_duration + Specify the minimum fragment duration (in microseconds). Default + 5000000. + + remove_at_exit + Specify whether to remove all fragments when finished. Default 0 + (do not remove). + + streamhash + Per stream hash testing format. + + This muxer computes and prints a cryptographic hash of all the input + frames, on a per-stream basis. This can be used for equality checks + without having to do a complete binary comparison. + + By default audio frames are converted to signed 16-bit raw audio and + video frames to raw video before computing the hash, but the output of + explicit conversions to other codecs can also be used. Timestamps are + ignored. It uses the SHA-256 cryptographic hash function by default, + but supports several other algorithms. + + The output of the muxer consists of one line per stream of the form: + streamindex,streamtype,algo=hash, where streamindex is the index of the + mapped stream, streamtype is a single character indicating the type of + stream, algo is a short string representing the hash function used, and + hash is a hexadecimal number representing the computed hash. + + hash algorithm + Use the cryptographic hash function specified by the string + algorithm. Supported values include "MD5", "murmur3", "RIPEMD128", + "RIPEMD160", "RIPEMD256", "RIPEMD320", "SHA160", "SHA224", "SHA256" + (default), "SHA512/224", "SHA512/256", "SHA384", "SHA512", "CRC32" + and "adler32". + + Examples + + To compute the SHA-256 hash of the input converted to raw audio and + video, and store it in the file out.sha256: + + ffmpeg -i INPUT -f streamhash out.sha256 + + To print an MD5 hash to stdout use the command: + + ffmpeg -i INPUT -f streamhash -hash md5 - + + See also the hash and framehash muxers. + + fifo + The fifo pseudo-muxer allows the separation of encoding and muxing by + using first-in-first-out queue and running the actual muxer in a + separate thread. This is especially useful in combination with the tee + muxer and can be used to send data to several destinations with + different reliability/writing speed/latency. + + API users should be aware that callback functions (interrupt_callback, + io_open and io_close) used within its AVFormatContext must be thread- + safe. + + The behavior of the fifo muxer if the queue fills up or if the output + fails is selectable, + + o output can be transparently restarted with configurable delay + between retries based on real time or time of the processed stream. + + o encoding can be blocked during temporary failure, or continue + transparently dropping packets in case fifo queue fills up. + + fifo_format + Specify the format name. Useful if it cannot be guessed from the + output name suffix. + + queue_size + Specify size of the queue (number of packets). Default value is 60. + + format_opts + Specify format options for the underlying muxer. Muxer options can + be specified as a list of key=value pairs separated by ':'. + + drop_pkts_on_overflow bool + If set to 1 (true), in case the fifo queue fills up, packets will + be dropped rather than blocking the encoder. This makes it possible + to continue streaming without delaying the input, at the cost of + omitting part of the stream. By default this option is set to 0 + (false), so in such cases the encoder will be blocked until the + muxer processes some of the packets and none of them is lost. + + attempt_recovery bool + If failure occurs, attempt to recover the output. This is + especially useful when used with network output, since it makes it + possible to restart streaming transparently. By default this + option is set to 0 (false). + + max_recovery_attempts + Sets maximum number of successive unsuccessful recovery attempts + after which the output fails permanently. By default this option is + set to 0 (unlimited). + + recovery_wait_time duration + Waiting time before the next recovery attempt after previous + unsuccessful recovery attempt. Default value is 5 seconds. + + recovery_wait_streamtime bool + If set to 0 (false), the real time is used when waiting for the + recovery attempt (i.e. the recovery will be attempted after at + least recovery_wait_time seconds). If set to 1 (true), the time of + the processed stream is taken into account instead (i.e. the + recovery will be attempted after at least recovery_wait_time + seconds of the stream is omitted). By default, this option is set + to 0 (false). + + recover_any_error bool + If set to 1 (true), recovery will be attempted regardless of type + of the error causing the failure. By default this option is set to + 0 (false) and in case of certain (usually permanent) errors the + recovery is not attempted even when attempt_recovery is set to 1. + + restart_with_keyframe bool + Specify whether to wait for the keyframe after recovering from + queue overflow or failure. This option is set to 0 (false) by + default. + + timeshift duration + Buffer the specified amount of packets and delay writing the + output. Note that queue_size must be big enough to store the + packets for timeshift. At the end of the input the fifo buffer is + flushed at realtime speed. + + Examples + + o Stream something to rtmp server, continue processing the stream at + real-time rate even in case of temporary failure (network outage) + and attempt to recover streaming every second indefinitely. + + ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0:a + -drop_pkts_on_overflow 1 -attempt_recovery 1 -recovery_wait_time 1 rtmp://example.com/live/stream_name + + tee + The tee muxer can be used to write the same data to several outputs, + such as files or streams. It can be used, for example, to stream a + video over a network and save it to disk at the same time. + + It is different from specifying several outputs to the ffmpeg command- + line tool. With the tee muxer, the audio and video data will be encoded + only once. With conventional multiple outputs, multiple encoding + operations in parallel are initiated, which can be a very expensive + process. The tee muxer is not useful when using the libavformat API + directly because it is then possible to feed the same packets to + several muxers directly. + + Since the tee muxer does not represent any particular output format, + ffmpeg cannot auto-select output streams. So all streams intended for + output must be specified using "-map". See the examples below. + + Some encoders may need different options depending on the output + format; the auto-detection of this can not work with the tee muxer, so + they need to be explicitly specified. The main example is the + global_header flag. + + The slave outputs are specified in the file name given to the muxer, + separated by '|'. If any of the slave name contains the '|' separator, + leading or trailing spaces or any special character, those must be + escaped (see the "Quoting and escaping" section in the ffmpeg-utils(1) + manual). + + Options + + use_fifo bool + If set to 1, slave outputs will be processed in separate threads + using the fifo muxer. This allows to compensate for different + speed/latency/reliability of outputs and setup transparent + recovery. By default this feature is turned off. + + fifo_options + Options to pass to fifo pseudo-muxer instances. See fifo. + + Muxer options can be specified for each slave by prepending them as a + list of key=value pairs separated by ':', between square brackets. If + the options values contain a special character or the ':' separator, + they must be escaped; note that this is a second level escaping. + + The following special options are also recognized: + + f Specify the format name. Required if it cannot be guessed from the + output URL. + + bsfs[/spec] + Specify a list of bitstream filters to apply to the specified + output. + + It is possible to specify to which streams a given bitstream filter + applies, by appending a stream specifier to the option separated by + "/". spec must be a stream specifier (see Format stream + specifiers). + + If the stream specifier is not specified, the bitstream filters + will be applied to all streams in the output. This will cause that + output operation to fail if the output contains streams to which + the bitstream filter cannot be applied e.g. "h264_mp4toannexb" + being applied to an output containing an audio stream. + + Options for a bitstream filter must be specified in the form of + "opt=value". + + Several bitstream filters can be specified, separated by ",". + + use_fifo bool + This allows to override tee muxer use_fifo option for individual + slave muxer. + + fifo_options + This allows to override tee muxer fifo_options for individual slave + muxer. See fifo. + + select + Select the streams that should be mapped to the slave output, + specified by a stream specifier. If not specified, this defaults to + all the mapped streams. This will cause that output operation to + fail if the output format does not accept all mapped streams. + + You may use multiple stream specifiers separated by commas (",") + e.g.: "a:0,v" + + onfail + Specify behaviour on output failure. This can be set to either + "abort" (which is default) or "ignore". "abort" will cause whole + process to fail in case of failure on this slave output. "ignore" + will ignore failure on this output, so other outputs will continue + without being affected. + + Examples + + o Encode something and both archive it in a WebM file and stream it + as MPEG-TS over UDP: + + ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a + "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/" + + o As above, but continue streaming even if output to local file fails + (for example local drive fills up): + + ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a + "[onfail=ignore]archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/" + + o Use ffmpeg to encode the input, and send the output to three + different destinations. The "dump_extra" bitstream filter is used + to add extradata information to all the output video keyframes + packets, as requested by the MPEG-TS format. The select option is + applied to out.aac in order to make it contain only audio packets. + + ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac + -f tee "[bsfs/v=dump_extra=freq=keyframe]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac" + + o As above, but select only stream "a:1" for the audio output. Note + that a second level escaping must be performed, as ":" is a special + character used to separate options. + + ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac + -f tee "[bsfs/v=dump_extra=freq=keyframe]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac" + + webm_dash_manifest + WebM DASH Manifest muxer. + + This muxer implements the WebM DASH Manifest specification to generate + the DASH manifest XML. It also supports manifest generation for DASH + live streams. + + For more information see: + + o WebM DASH Specification: + + + o ISO DASH Specification: + + + Options + + This muxer supports the following options: + + adaptation_sets + This option has the following syntax: "id=x,streams=a,b,c + id=y,streams=d,e" where x and y are the unique identifiers of the + adaptation sets and a,b,c,d and e are the indices of the + corresponding audio and video streams. Any number of adaptation + sets can be added using this option. + + live + Set this to 1 to create a live stream DASH Manifest. Default: 0. + + chunk_start_index + Start index of the first chunk. This will go in the startNumber + attribute of the SegmentTemplate element in the manifest. Default: + 0. + + chunk_duration_ms + Duration of each chunk in milliseconds. This will go in the + duration attribute of the SegmentTemplate element in the manifest. + Default: 1000. + + utc_timing_url + URL of the page that will return the UTC timestamp in ISO format. + This will go in the value attribute of the UTCTiming element in the + manifest. Default: None. + + time_shift_buffer_depth + Smallest time (in seconds) shifting buffer for which any + Representation is guaranteed to be available. This will go in the + timeShiftBufferDepth attribute of the MPD element. Default: 60. + + minimum_update_period + Minimum update period (in seconds) of the manifest. This will go in + the minimumUpdatePeriod attribute of the MPD element. Default: 0. + + Example + + ffmpeg -f webm_dash_manifest -i video1.webm \ + -f webm_dash_manifest -i video2.webm \ + -f webm_dash_manifest -i audio1.webm \ + -f webm_dash_manifest -i audio2.webm \ + -map 0 -map 1 -map 2 -map 3 \ + -c copy \ + -f webm_dash_manifest \ + -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \ + manifest.xml + + webm_chunk + WebM Live Chunk Muxer. + + This muxer writes out WebM headers and chunks as separate files which + can be consumed by clients that support WebM Live streams via DASH. + + Options + + This muxer supports the following options: + + chunk_start_index + Index of the first chunk (defaults to 0). + + header + Filename of the header where the initialization data will be + written. + + audio_chunk_duration + Duration of each audio chunk in milliseconds (defaults to 5000). + + Example + + ffmpeg -f v4l2 -i /dev/video0 \ + -f alsa -i hw:0 \ + -map 0:0 \ + -c:v libvpx-vp9 \ + -s 640x360 -keyint_min 30 -g 30 \ + -f webm_chunk \ + -header webm_live_video_360.hdr \ + -chunk_start_index 1 \ + webm_live_video_360_%d.chk \ + -map 1:0 \ + -c:a libvorbis \ + -b:a 128k \ + -f webm_chunk \ + -header webm_live_audio_128.hdr \ + -chunk_start_index 1 \ + -audio_chunk_duration 1000 \ + webm_live_audio_128_%d.chk + +METADATA + FFmpeg is able to dump metadata from media files into a simple + UTF-8-encoded INI-like text file and then load it back using the + metadata muxer/demuxer. + + The file format is as follows: + + 1. A file consists of a header and a number of metadata tags divided + into sections, each on its own line. + + 2. The header is a ;FFMETADATA string, followed by a version number + (now 1). + + 3. Metadata tags are of the form key=value + + 4. Immediately after header follows global metadata + + 5. After global metadata there may be sections with + per-stream/per-chapter metadata. + + 6. A section starts with the section name in uppercase (i.e. STREAM or + CHAPTER) in brackets ([, ]) and ends with next section or end of + file. + + 7. At the beginning of a chapter section there may be an optional + timebase to be used for start/end values. It must be in form + TIMEBASE=num/den, where num and den are integers. If the timebase + is missing then start/end times are assumed to be in nanoseconds. + + Next a chapter section must contain chapter start and end times in + form START=num, END=num, where num is a positive integer. + + 8. Empty lines and lines starting with ; or # are ignored. + + 9. Metadata keys or values containing special characters (=, ;, #, \ + and a newline) must be escaped with a backslash \. + + 10. Note that whitespace in metadata (e.g. foo = bar) is considered to + be a part of the tag (in the example above key is foo , value is + bar). + + A ffmetadata file might look like this: + + ;FFMETADATA1 + title=bike\\shed + ;this is a comment + artist=FFmpeg troll team + + [CHAPTER] + TIMEBASE=1/1000 + START=0 + #chapter ends at 0:01:00 + END=60000 + title=chapter \#1 + [STREAM] + title=multi\ + line + + By using the ffmetadata muxer and demuxer it is possible to extract + metadata from an input file to an ffmetadata file, and then transcode + the file into an output file with the edited ffmetadata file. + + Extracting an ffmetadata file with ffmpeg goes as follows: + + ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE + + Reinserting edited metadata information from the FFMETADATAFILE file + can be done as: + + ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT + +PROTOCOL OPTIONS + The libavformat library provides some generic global options, which can + be set on all the protocols. In addition each protocol may support so- + called private options, which are specific for that component. + + Options may be set by specifying -option value in the FFmpeg tools, or + by setting the value explicitly in the "AVFormatContext" options or + using the libavutil/opt.h API for programmatic use. + + The list of supported options follows: + + protocol_whitelist list (input) + Set a ","-separated list of allowed protocols. "ALL" matches all + protocols. Protocols prefixed by "-" are disabled. All protocols + are allowed by default but protocols used by an another protocol + (nested protocols) are restricted to a per protocol subset. + +PROTOCOLS + Protocols are configured elements in FFmpeg that enable access to + resources that require specific protocols. + + When you configure your FFmpeg build, all the supported protocols are + enabled by default. You can list all available ones using the configure + option "--list-protocols". + + You can disable all the protocols using the configure option + "--disable-protocols", and selectively enable a protocol using the + option "--enable-protocol=PROTOCOL", or you can disable a particular + protocol using the option "--disable-protocol=PROTOCOL". + + The option "-protocols" of the ff* tools will display the list of + supported protocols. + + All protocols accept the following options: + + rw_timeout + Maximum time to wait for (network) read/write operations to + complete, in microseconds. + + A description of the currently available protocols follows. + + amqp + Advanced Message Queueing Protocol (AMQP) version 0-9-1 is a broker + based publish-subscribe communication protocol. + + FFmpeg must be compiled with --enable-librabbitmq to support AMQP. A + separate AMQP broker must also be run. An example open-source AMQP + broker is RabbitMQ. + + After starting the broker, an FFmpeg client may stream data to the + broker using the command: + + ffmpeg -re -i input -f mpegts amqp://[[user]:[password]@]hostname[:port][/vhost] + + Where hostname and port (default is 5672) is the address of the broker. + The client may also set a user/password for authentication. The default + for both fields is "guest". Name of virtual host on broker can be set + with vhost. The default value is "/". + + Muliple subscribers may stream from the broker using the command: + + ffplay amqp://[[user]:[password]@]hostname[:port][/vhost] + + In RabbitMQ all data published to the broker flows through a specific + exchange, and each subscribing client has an assigned queue/buffer. + When a packet arrives at an exchange, it may be copied to a client's + queue depending on the exchange and routing_key fields. + + The following options are supported: + + exchange + Sets the exchange to use on the broker. RabbitMQ has several + predefined exchanges: "amq.direct" is the default exchange, where + the publisher and subscriber must have a matching routing_key; + "amq.fanout" is the same as a broadcast operation (i.e. the data is + forwarded to all queues on the fanout exchange independent of the + routing_key); and "amq.topic" is similar to "amq.direct", but + allows for more complex pattern matching (refer to the RabbitMQ + documentation). + + routing_key + Sets the routing key. The default value is "amqp". The routing key + is used on the "amq.direct" and "amq.topic" exchanges to decide + whether packets are written to the queue of a subscriber. + + pkt_size + Maximum size of each packet sent/received to the broker. Default is + 131072. Minimum is 4096 and max is any large value (representable + by an int). When receiving packets, this sets an internal buffer + size in FFmpeg. It should be equal to or greater than the size of + the published packets to the broker. Otherwise the received message + may be truncated causing decoding errors. + + connection_timeout + The timeout in seconds during the initial connection to the broker. + The default value is rw_timeout, or 5 seconds if rw_timeout is not + set. + + delivery_mode mode + Sets the delivery mode of each message sent to broker. The + following values are accepted: + + persistent + Delivery mode set to "persistent" (2). This is the default + value. Messages may be written to the broker's disk depending + on its setup. + + non-persistent + Delivery mode set to "non-persistent" (1). Messages will stay + in broker's memory unless the broker is under memory pressure. + + async + Asynchronous data filling wrapper for input stream. + + Fill data in a background thread, to decouple I/O operation from demux + thread. + + async: + async:http://host/resource + async:cache:http://host/resource + + bluray + Read BluRay playlist. + + The accepted options are: + + angle + BluRay angle + + chapter + Start chapter (1...N) + + playlist + Playlist to read (BDMV/PLAYLIST/?????.mpls) + + Examples: + + Read longest playlist from BluRay mounted to /mnt/bluray: + + bluray:/mnt/bluray + + Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start + from chapter 2: + + -playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray + + cache + Caching wrapper for input stream. + + Cache the input stream to temporary file. It brings seeking capability + to live streams. + + The accepted options are: + + read_ahead_limit + Amount in bytes that may be read ahead when seeking isn't + supported. Range is -1 to INT_MAX. -1 for unlimited. Default is + 65536. + + URL Syntax is + + cache: + + concat + Physical concatenation protocol. + + Read and seek from many resources in sequence as if they were a unique + resource. + + A URL accepted by this protocol has the syntax: + + concat:||...| + + where URL1, URL2, ..., URLN are the urls of the resource to be + concatenated, each one possibly specifying a distinct protocol. + + For example to read a sequence of files split1.mpeg, split2.mpeg, + split3.mpeg with ffplay use the command: + + ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg + + Note that you may need to escape the character "|" which is special for + many shells. + + crypto + AES-encrypted stream reading protocol. + + The accepted options are: + + key Set the AES decryption key binary block from given hexadecimal + representation. + + iv Set the AES decryption initialization vector binary block from + given hexadecimal representation. + + Accepted URL formats: + + crypto: + crypto+ + + data + Data in-line in the URI. See + . + + For example, to convert a GIF file given inline with ffmpeg: + + ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png + + file + File access protocol. + + Read from or write to a file. + + A file URL can have the form: + + file: + + where filename is the path of the file to read. + + An URL that does not have a protocol prefix will be assumed to be a + file URL. Depending on the build, an URL that looks like a Windows path + with the drive letter at the beginning will also be assumed to be a + file URL (usually not the case in builds for unix-like systems). + + For example to read from a file input.mpeg with ffmpeg use the command: + + ffmpeg -i file:input.mpeg output.mpeg + + This protocol accepts the following options: + + truncate + Truncate existing files on write, if set to 1. A value of 0 + prevents truncating. Default value is 1. + + blocksize + Set I/O operation maximum block size, in bytes. Default value is + "INT_MAX", which results in not limiting the requested block size. + Setting this value reasonably low improves user termination request + reaction time, which is valuable for files on slow medium. + + follow + If set to 1, the protocol will retry reading at the end of the + file, allowing reading files that still are being written. In order + for this to terminate, you either need to use the rw_timeout + option, or use the interrupt callback (for API users). + + seekable + Controls if seekability is advertised on the file. 0 means non- + seekable, -1 means auto (seekable for normal files, non-seekable + for named pipes). + + Many demuxers handle seekable and non-seekable resources + differently, overriding this might speed up opening certain files + at the cost of losing some features (e.g. accurate seeking). + + ftp + FTP (File Transfer Protocol). + + Read from or write to remote resources using FTP protocol. + + Following syntax is required. + + ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg + + This protocol accepts the following options. + + timeout + Set timeout in microseconds of socket I/O operations used by the + underlying low level operation. By default it is set to -1, which + means that the timeout is not specified. + + ftp-user + Set a user to be used for authenticating to the FTP server. This is + overridden by the user in the FTP URL. + + ftp-password + Set a password to be used for authenticating to the FTP server. + This is overridden by the password in the FTP URL, or by ftp- + anonymous-password if no user is set. + + ftp-anonymous-password + Password used when login as anonymous user. Typically an e-mail + address should be used. + + ftp-write-seekable + Control seekability of connection during encoding. If set to 1 the + resource is supposed to be seekable, if set to 0 it is assumed not + to be seekable. Default value is 0. + + NOTE: Protocol can be used as output, but it is recommended to not do + it, unless special care is taken (tests, customized server + configuration etc.). Different FTP servers behave in different way + during seek operation. ff* tools may produce incomplete content due to + server limitations. + + gopher + Gopher protocol. + + gophers + Gophers protocol. + + The Gopher protocol with TLS encapsulation. + + hls + Read Apple HTTP Live Streaming compliant segmented stream as a uniform + one. The M3U8 playlists describing the segments can be remote HTTP + resources or local files, accessed using the standard file protocol. + The nested protocol is declared by specifying "+proto" after the hls + URI scheme name, where proto is either "file" or "http". + + hls+http://host/path/to/remote/resource.m3u8 + hls+file://path/to/local/resource.m3u8 + + Using this protocol is discouraged - the hls demuxer should work just + as well (if not, please report the issues) and is more complete. To + use the hls demuxer instead, simply use the direct URLs to the m3u8 + files. + + http + HTTP (Hyper Text Transfer Protocol). + + This protocol accepts the following options: + + seekable + Control seekability of connection. If set to 1 the resource is + supposed to be seekable, if set to 0 it is assumed not to be + seekable, if set to -1 it will try to autodetect if it is seekable. + Default value is -1. + + chunked_post + If set to 1 use chunked Transfer-Encoding for posts, default is 1. + + content_type + Set a specific content type for the POST messages or for listen + mode. + + http_proxy + set HTTP proxy to tunnel through e.g. http://example.com:1234 + + headers + Set custom HTTP headers, can override built in default headers. The + value must be a string encoding the headers. + + multiple_requests + Use persistent connections if set to 1, default is 0. + + post_data + Set custom HTTP post data. + + referer + Set the Referer header. Include 'Referer: URL' header in HTTP + request. + + user_agent + Override the User-Agent header. If not specified the protocol will + use a string describing the libavformat build. ("Lavf/") + + user-agent + This is a deprecated option, you can use user_agent instead it. + + reconnect_at_eof + If set then eof is treated like an error and causes reconnection, + this is useful for live / endless streams. + + reconnect_streamed + If set then even streamed/non seekable streams will be reconnected + on errors. + + reconnect_on_network_error + Reconnect automatically in case of TCP/TLS errors during connect. + + reconnect_on_http_error + A comma separated list of HTTP status codes to reconnect on. The + list can include specific status codes (e.g. '503') or the strings + '4xx' / '5xx'. + + reconnect_delay_max + Sets the maximum delay in seconds after which to give up + reconnecting + + mime_type + Export the MIME type. + + http_version + Exports the HTTP response version number. Usually "1.0" or "1.1". + + icy If set to 1 request ICY (SHOUTcast) metadata from the server. If + the server supports this, the metadata has to be retrieved by the + application by reading the icy_metadata_headers and + icy_metadata_packet options. The default is 1. + + icy_metadata_headers + If the server supports ICY metadata, this contains the ICY-specific + HTTP reply headers, separated by newline characters. + + icy_metadata_packet + If the server supports ICY metadata, and icy was set to 1, this + contains the last non-empty metadata packet sent by the server. It + should be polled in regular intervals by applications interested in + mid-stream metadata updates. + + cookies + Set the cookies to be sent in future requests. The format of each + cookie is the same as the value of a Set-Cookie HTTP response + field. Multiple cookies can be delimited by a newline character. + + offset + Set initial byte offset. + + end_offset + Try to limit the request to bytes preceding this offset. + + method + When used as a client option it sets the HTTP method for the + request. + + When used as a server option it sets the HTTP method that is going + to be expected from the client(s). If the expected and the + received HTTP method do not match the client will be given a Bad + Request response. When unset the HTTP method is not checked for + now. This will be replaced by autodetection in the future. + + listen + If set to 1 enables experimental HTTP server. This can be used to + send data when used as an output option, or read data from a client + with HTTP POST when used as an input option. If set to 2 enables + experimental multi-client HTTP server. This is not yet implemented + in ffmpeg.c and thus must not be used as a command line option. + + # Server side (sending): + ffmpeg -i somefile.ogg -c copy -listen 1 -f ogg http://: + + # Client side (receiving): + ffmpeg -i http://: -c copy somefile.ogg + + # Client can also be done with wget: + wget http://: -O somefile.ogg + + # Server side (receiving): + ffmpeg -listen 1 -i http://: -c copy somefile.ogg + + # Client side (sending): + ffmpeg -i somefile.ogg -chunked_post 0 -c copy -f ogg http://: + + # Client can also be done with wget: + wget --post-file=somefile.ogg http://: + + send_expect_100 + Send an Expect: 100-continue header for POST. If set to 1 it will + send, if set to 0 it won't, if set to -1 it will try to send if it + is applicable. Default value is -1. + + auth_type + Set HTTP authentication type. No option for Digest, since this + method requires getting nonce parameters from the server first and + can't be used straight away like Basic. + + none + Choose the HTTP authentication type automatically. This is the + default. + + basic + Choose the HTTP basic authentication. + + Basic authentication sends a Base64-encoded string that + contains a user name and password for the client. Base64 is not + a form of encryption and should be considered the same as + sending the user name and password in clear text (Base64 is a + reversible encoding). If a resource needs to be protected, + strongly consider using an authentication scheme other than + basic authentication. HTTPS/TLS should be used with basic + authentication. Without these additional security + enhancements, basic authentication should not be used to + protect sensitive or valuable information. + + HTTP Cookies + + Some HTTP requests will be denied unless cookie values are passed in + with the request. The cookies option allows these cookies to be + specified. At the very least, each cookie must specify a value along + with a path and domain. HTTP requests that match both the domain and + path will automatically include the cookie value in the HTTP Cookie + header field. Multiple cookies can be delimited by a newline. + + The required syntax to play a stream specifying a cookie is: + + ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8 + + Icecast + Icecast protocol (stream to Icecast servers) + + This protocol accepts the following options: + + ice_genre + Set the stream genre. + + ice_name + Set the stream name. + + ice_description + Set the stream description. + + ice_url + Set the stream website URL. + + ice_public + Set if the stream should be public. The default is 0 (not public). + + user_agent + Override the User-Agent header. If not specified a string of the + form "Lavf/" will be used. + + password + Set the Icecast mountpoint password. + + content_type + Set the stream content type. This must be set if it is different + from audio/mpeg. + + legacy_icecast + This enables support for Icecast versions < 2.4.0, that do not + support the HTTP PUT method but the SOURCE method. + + tls Establish a TLS (HTTPS) connection to Icecast. + + icecast://[[:]@]:/ + + mmst + MMS (Microsoft Media Server) protocol over TCP. + + mmsh + MMS (Microsoft Media Server) protocol over HTTP. + + The required syntax is: + + mmsh://[:][/][/] + + md5 + MD5 output protocol. + + Computes the MD5 hash of the data to be written, and on close writes + this to the designated output or stdout if none is specified. It can be + used to test muxers without writing an actual file. + + Some examples follow. + + # Write the MD5 hash of the encoded AVI file to the file output.avi.md5. + ffmpeg -i input.flv -f avi -y md5:output.avi.md5 + + # Write the MD5 hash of the encoded AVI file to stdout. + ffmpeg -i input.flv -f avi -y md5: + + Note that some formats (typically MOV) require the output protocol to + be seekable, so they will fail with the MD5 output protocol. + + pipe + UNIX pipe access protocol. + + Read and write from UNIX pipes. + + The accepted syntax is: + + pipe:[] + + number is the number corresponding to the file descriptor of the pipe + (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number is not + specified, by default the stdout file descriptor will be used for + writing, stdin for reading. + + For example to read from stdin with ffmpeg: + + cat test.wav | ffmpeg -i pipe:0 + # ...this is the same as... + cat test.wav | ffmpeg -i pipe: + + For writing to stdout with ffmpeg: + + ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi + # ...this is the same as... + ffmpeg -i test.wav -f avi pipe: | cat > test.avi + + This protocol accepts the following options: + + blocksize + Set I/O operation maximum block size, in bytes. Default value is + "INT_MAX", which results in not limiting the requested block size. + Setting this value reasonably low improves user termination request + reaction time, which is valuable if data transmission is slow. + + Note that some formats (typically MOV), require the output protocol to + be seekable, so they will fail with the pipe output protocol. + + prompeg + Pro-MPEG Code of Practice #3 Release 2 FEC protocol. + + The Pro-MPEG CoP#3 FEC is a 2D parity-check forward error correction + mechanism for MPEG-2 Transport Streams sent over RTP. + + This protocol must be used in conjunction with the "rtp_mpegts" muxer + and the "rtp" protocol. + + The required syntax is: + + -f rtp_mpegts -fec prompeg=