下载地址:https://www.pan38.com/dow/share.php?code=JCnzE 提取密码:2811
代码说明:
auto_live_stream.py 实现视频循环推流功能,支持RTMP协议推送到直播平台
ai_virtual_host.py 实现基于BERT的AI虚拟主播交互系统
需要安装opencv, ffmpeg, transformers等依赖库
实际使用时需要申请各直播平台的推流地址
import cv2
import numpy as np
import subprocess
import time
from datetime import datetime
import os
class AutoLiveStream:
def init(self, video_path, rtmp_url):
self.video_path = video_path
self.rtmp_url = rtmp_url
self.fps = 30
self.width = 720
self.height = 1280
self.running = False
def get_video_info(self):
cap = cv2.VideoCapture(self.video_path)
self.fps = int(cap.get(cv2.CAP_PROP_FPS))
self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
def generate_overlay(self, frame):
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
cv2.putText(frame, timestamp, (50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
return frame
def start_stream(self):
self.get_video_info()
command = [
'ffmpeg',
'-re',
'-i', self.video_path,
'-vf', f'fps={self.fps},scale={self.width}:{self.height}',
'-c:v', 'libx264',
'-preset', 'ultrafast',
'-tune', 'zerolatency',
'-pix_fmt', 'yuv420p',
'-f', 'flv',
self.rtmp_url
]
self.process = subprocess.Popen(command)
self.running = True
print(f"开始推流到 {self.rtmp_url}")
def stop_stream(self):
if self.running:
self.process.terminate()
self.running = False
print("推流已停止")
def run_loop(self):
cap = cv2.VideoCapture(self.video_path)
while self.running:
ret, frame = cap.read()
if not ret:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
frame = self.generate_overlay(frame)
cv2.imshow('Live Preview', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if name == "main":
# 配置参数
VIDEO_FILE = "demo.mp4"
RTMP_URL = "rtmp://your-stream-server/app/stream-key"
streamer = AutoLiveStream(VIDEO_FILE, RTMP_URL)
try:
streamer.start_stream()
streamer.run_loop()
except KeyboardInterrupt:
pass
finally:
streamer.stop_stream()
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import speech_recognition as sr
import pyttsx3
import threading
class AIVirtualHost:
def init(self):
self.model_name = "bert-base-chinese"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
self.recognizer = sr.Recognizer()
self.engine = pyttsx3.init()
self.is_running = False
def listen(self):
with sr.Microphone() as source:
print("AI主播正在聆听...")
audio = self.recognizer.listen(source)
try:
text = self.recognizer.recognize_google(audio, language="zh-CN")
return text
except Exception as e:
print(f"识别错误: {e}")
return None
def generate_response(self, input_text):
inputs = self.tokenizer(input_text, return_tensors="pt")
outputs = self.model.generate(**inputs)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
def speak(self, text):
self.engine.say(text)
self.engine.runAndWait()
def run_interactive(self):
self.is_running = True
while self.is_running:
user_input = self.listen()
if user_input:
response = self.generate_response(user_input)
print(f"AI主播回复: {response}")
self.speak(response)
def start(self):
thread = threading.Thread(target=self.run_interactive)
thread.start()
def stop(self):
self.is_running = False
if name == "main":
host = AIVirtualHost()
print("AI虚拟主播已启动,按Ctrl+C退出")
try:
host.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
host.stop()