下载地址:https://www.pan38.com/share.php?code=pvvmX 提取码:8888
基于Python的人脸识别照片动画生成系统,包含眨眼和张嘴动作的完整实现。这个系统使用OpenCV、dlib和图像处理技术来实现。
import cv2
import dlib
import numpy as np
from scipy.spatial import Delaunay
from skimage.transform import PiecewiseAffineTransform, warp
import os
import argparse
class FaceAnimator:
def init(self):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
self.blink_sequence = [0.1, 0.3, 0.7, 0.9, 0.7, 0.3, 0.1]
self.mouth_open_sequence = [0.1, 0.3, 0.5, 0.7, 0.9, 0.7, 0.5, 0.3, 0.1]
def load_image(self, image_path):
image = cv2.imread(image_path)
if image is None:
raise ValueError(f"无法加载图像: {image_path}")
return image
def get_landmarks(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = self.detector(gray)
if len(faces) == 0:
raise ValueError("未检测到人脸")
landmarks = self.predictor(gray, faces[0])
return np.array([(p.x, p.y) for p in landmarks.parts()])
def create_triangles(self, points):
rect = (0, 0, 1000, 1000)
subdiv = cv2.Subdiv2D(rect)
for p in points:
subdiv.insert((int(p[0]), int(p[1])))
triangle_list = subdiv.getTriangleList()
triangles = []
for t in triangle_list:
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
triangles.append((pt1, pt2, pt3))
return triangles
def apply_affine_transform(self, src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]), None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return dst
def warp_triangle(self, img1, img2, t1, t2):
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
t1_rect = []
t2_rect = []
t2_rect_int = []
for i in range(0, 3):
t1_rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
t2_rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
t2_rect_int.append((int(t2[i][0] - r2[0]), int(t2[i][1] - r2[1])))
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0), 16, 0)
img1_rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
warp_image = self.apply_affine_transform(img1_rect, t1_rect, t2_rect, size)
warp_image = warp_image * mask
img2_rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
img2_rect = img2_rect * (1 - mask)
img2_rect = img2_rect + warp_image
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2_rect
def generate_blink_frame(self, img, landmarks, intensity):
new_landmarks = landmarks.copy()
# 上眼皮点
upper_lid_left = [37, 38, 43, 44]
upper_lid_right = [36, 39, 40, 41]
# 下眼皮点
lower_lid_left = [41, 40, 47, 46]
lower_lid_right = [36, 47, 46, 45]
# 调整上眼皮点
for idx in upper_lid_left + upper_lid_right:
new_landmarks[idx][1] += intensity * 5
# 调整下眼皮点
for idx in lower_lid_left + lower_lid_right:
new_landmarks[idx][1] -= intensity * 3
return self.warp_image(img, landmarks, new_landmarks)
def generate_mouth_open_frame(self, img, landmarks, intensity):
new_landmarks = landmarks.copy()
# 上嘴唇点
upper_lip = [48, 49, 50, 51, 52, 53, 54, 64, 63, 62, 61, 60]
# 下嘴唇点
lower_lip = [54, 55, 56, 57, 58, 59, 48, 60, 67, 66, 65, 64]
# 调整上嘴唇点
for idx in upper_lip:
new_landmarks[idx][1] -= intensity * 2
# 调整下嘴唇点
for idx in lower_lip:
new_landmarks[idx][1] += intensity * 3
return self.warp_image(img, landmarks, new_landmarks)
def warp_image(self, img, src_points, dst_points):
triangles = self.create_triangles(src_points)
warped_img = np.zeros(img.shape, dtype=img.dtype)
for triangle in triangles:
src_tri = []
dst_tri = []
for p in triangle:
src_idx = np.where((src_points == p).all(axis=1))[0]
if len(src_idx) > 0:
src_tri.append(src_points[src_idx[0]])
dst_tri.append(dst_points[src_idx[0]])
if len(src_tri) == 3:
self.warp_triangle(img, warped_img, src_tri, dst_tri)
return warped_img
def generate_animation(self, input_image, output_path, animation_type="blink"):
img = self.load_image(input_image)
landmarks = self.get_landmarks(img)
if animation_type == "blink":
sequence = self.blink_sequence
generator = self.generate_blink_frame
elif animation_type == "mouth":
sequence = self.mouth_open_sequence
generator = self.generate_mouth_open_frame
else:
raise ValueError("不支持的动画类型")
frames = []
for intensity in sequence:
frame = generator(img, landmarks, intensity)
frames.append(frame)
# 添加原始帧使动画更自然
frames.extend(frames[::-1][1:-1])
# 保存为GIF
if output_path.endswith('.gif'):
import imageio
with imageio.get_writer(output_path, mode='I', duration=0.1) as writer:
for frame in frames:
writer.append_data(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
for i, frame in enumerate(frames):
cv2.imwrite(f"{output_path}_{i}.jpg", frame)
if name == "main":
parser = argparse.ArgumentParser(description="人脸动画生成器")
parser.add_argument("-i", "--input", required=True, help="输入图像路径")
parser.add_argument("-o", "--output", required=True, help="输出路径")
parser.add_argument("-t", "--type", choices=["blink", "mouth"], default="blink",
help="动画类型: blink(眨眼)或mouth(张嘴)")
args = parser.parse_args()
animator = FaceAnimator()
animator.generate_animation(args.input, args.output, args.type)
人脸动画生成器
功能
- 从单张照片生成眨眼动画
- 从单张照片生成张嘴动画
- 输出为GIF或序列帧
使用方法
- 安装依赖:
pip install -r requirements.txt
- 下载dlib模型文件: shape_predictor_68_face_landmarks.dat
- 运行程序:
python face_animator.py -i input.jpg -o output.gif -t blink
参数说明
- -i/--input: 输入图像路径
- -o/--output: 输出路径(.gif或前缀)
- -t/--type: 动画类型(blink或mouth)
示例
生成眨眼动画:
python face_animator.py -i photo.jpg -o blink.gif -t blink