提交 8007b763 编写于 作者: 三月三net's avatar 三月三net

Python超人-宇宙模拟器

上级 c48d4d47
from ursina import Entity, time, application, Path
import os, shutil
import builtins
import numpy as np
class VideoRecorder(Entity):
def __init__(self, temp_dir="video_temp", file_name="test", asset_folder=None, save_as_dir=None):
self.temp_dir = temp_dir
self.file_name = file_name
self.save_as_dir = save_as_dir
if asset_folder is None:
asset_folder = application.asset_folder
# G:\works\gitcode\universe_sim\sim_scenes\science
self.file_path = Path(asset_folder) / self.temp_dir
self.duration = 1.0
self.fps = 30
self.sd = 6
self.t = 0
self.i = 0
if getattr(builtins, 'base', None) is not None:
if self.file_path.exists():
# os.rmdir(self.file_path)
shutil.rmtree(self.file_path)
self.file_path.mkdir()
base.movie(namePrefix=f'\\{self.temp_dir}\\{self.file_name}', duration=self.duration, fps=self.fps,
format='png',
sd=self.sd)
def screenshot(self):
self.t += time.dt
if self.t >= 1 / self.fps:
base.screenshot(
namePrefix=f'\\{self.temp_dir}\\{self.file_name}_' + str(self.i).zfill(self.sd) + '.png',
defaultFilename=0,
)
self.t = 0
self.i += 1
def save_as_video(self):
import imageio
if not os.path.exists(self.file_path):
return
if self.save_as_dir is None:
self.save_as_dir = "."
writer = imageio.get_writer(Path(f'{self.save_as_dir}/{self.file_name}.mp4'), fps=self.fps)
for filename in os.listdir(self.file_path):
im = imageio.imread(self.file_path / filename)
if len(im.shape) == 2:
# 可能为透明图片(无影像数据)
continue
writer.append_data(im)
writer.close()
print('Saved VIDEO to:', Path(f'{self.save_as_dir}/{self.file_name}.mp4'))
def save_as_gif(self):
import imageio
images = []
if not os.path.exists(self.file_path):
return
if self.save_as_dir is None:
self.save_as_dir = self.file_path.parent
for filename in os.listdir(self.file_path):
images.append(imageio.imread(self.file_path / filename))
imageio.mimsave(Path(f'{self.save_as_dir}/{self.file_name}.gif'), images)
# shutil.rmtree(self.file_path) # delete temp folder
print('Saved GIF to:', Path(f'{self.save_as_dir}/{self.file_name}.gif'))
if __name__ == '__main__':
# Path("G:\\works\\gitcode\\universe_sim\\sim_scenes\\science")
vr = VideoRecorder(asset_folder="G:\\works\\gitcode\\universe_sim\\sim_scenes\\science")
# vr.save_as_gif()
vr.save_as_video()
# from ursina import *
# import os, shutil
# import numpy as np
#
# # import imageio # gets imported in convert_to_gif
# # from panda3d.core import PNMImage
# # pip install -i http://pypi.douban.com/simple/ --trusted-host=pypi.douban.com imageio-ffmpeg
# import shutil
#
#
# class VideoRecorder(Entity):
# def __init__(self, duration=50, name='untitled_video', **kwargs):
# os.environ["PATH"] = os.environ["PATH"] + ";" + "F:\\Tools\\ffmpeg"
# super().__init__()
# self.recording = False
# self.file_path = Path(application.asset_folder) / 'video_temp'
#
# if os.path.exists(self.file_path):
# shutil.rmtree(self.file_path)
#
# os.mkdir(self.file_path)
#
# # if os.path.exists(self.file_path):
# # os.rmdir(self.file_path)
# #
# # os.mkdir(self.file_path)
#
# self.i = 0
# self.duration = duration
# self.fps = 50
# self.video_name = name
# self.t = 0
#
# for key, value in kwargs.items():
# setattr(self, key, value)
#
# self.max_frames = int(self.duration * self.fps)
# self.frames = []
#
# def start_recording(self):
# print('start recording,', self.duration, self.file_path)
# window.fps_counter.enabled = False
# window.exit_button.visible = False
# self.frames = []
# self.max_frames = self.duration * self.fps
# if not self.file_path.exists():
# self.file_path.mkdir()
# base.movie(namePrefix=f'\\video_temp\\{self.video_name}', duration=self.duration, fps=self.fps, format='png', sd=4)
#
# self.recording = True
# invoke(self.stop_recording, delay=self.duration)
#
# def stop_recording(self):
# self.recording = False
# window.fps_counter.enabled = True
# window.exit_button.visible = True
# print('stop recording')
# # self.convert_to_gif()
# self.convert_to_vid()
#
# def update(self):
# if not self.recording:
# return
#
# self.t += time.dt
# if self.t >= 1 / 30:
# base.screenshot(
# namePrefix='\\video_temp\\' + self.video_name + '_' + str(self.i).zfill(4) + '.png',
# defaultFilename=0,
# )
# self.t = 0
# self.i += 1
#
# def convert_to_gif(self):
# import imageio
# images = []
# if not os.path.exists(self.file_path):
# return
#
# for filename in os.listdir(self.file_path):
# images.append(imageio.imread(self.file_path / filename))
#
# imageio.mimsave(Path(f'{self.file_path.parent}/{self.video_name}.gif'), images)
# shutil.rmtree(self.file_path) # delete temp folder
# print('saved gif to:', Path(f'{self.file_path.parent}/{self.video_name}.gif'))
#
# def convert_to_vid(self):
# import imageio
# images = []
# if not os.path.exists(self.file_path):
# return
#
# writer = imageio.get_writer('test.mp4', fps=self.fps)
# for file in os.listdir(self.file_path):
# im = imageio.imread(self.file_path / file)
# writer.append_data(im)
# writer.close()
# print('Video saved!!')
#
#
# class VideoRecorderUI(WindowPanel):
# def __init__(self, **kwargs):
# self.duration_label = Text('duration:')
# self.duration_field = InputField(default_value='5')
# self.fps_label = Text('fps:')
# self.fps_field = InputField(default_value='30')
# self.name_label = Text('name:')
# self.name_field = InputField(default_value='untitled_video')
#
# self.start_button = Button(text='Start Recording [Shift+F12]', color=color.azure, on_click=self.start_recording)
#
# super().__init__(
# title='Video Recorder [F12]',
# content=(
# self.duration_label,
# self.duration_field,
# self.fps_label,
# self.fps_field,
# self.name_label,
# self.name_field,
# Space(1),
# self.start_button,
# ),
# )
# self.y = .5
# self.scale *= .75
# self.visible = False
#
# def input(self, key):
# if key == 'f12':
# self.visible = not self.visible
#
# if held_keys['shift'] and key == 'f12':
# self.start_button.on_click()
#
# def start_recording(self):
# print(self.name_field)
# if self.name_field.text == '':
# self.name_field.blink(color.color(0, 1, 1, .5), .5)
# print('enter name')
# return
#
# # self.start_button.color=color.lime
# self.visible = False
# application.video_recorder.duration = float(self.duration_field.text)
# application.video_recorder.video_name = self.name_field.text
# application.video_recorder.frame_skip = 60 // int(self.fps_field.text)
# application.video_recorder.recording = True
#
#
# if __name__ == '__main__':
# app = Ursina()
# # window.size = (1600/3,900/3)
# # cube = primitives.RedCube()
# # cube.animate_x(5, duration=5, curve=curve.linear)
# # cube.animate_x(0, duration=5, curve=curve.linear, delay=5)
# # vr = VideoRecorder()
# # invoke(setattr, vr, 'recording', True, delay=1)
# # invoke(os._exit, 0, delay=6)
# # vr.recording = True
# window.size *= .5
# from ursina.prefabs.first_person_controller import FirstPersonController
# from ursina.shaders import lit_with_shadows_shader
#
# random.seed(0)
# Entity.default_shader = lit_with_shadows_shader
#
# ground = Entity(model='plane', collider='box', scale=64, texture='grass', texture_scale=(4, 4))
#
# editor_camera = EditorCamera(enabled=False, ignore_paused=True)
# player = FirstPersonController(model='cube', z=-10, color=color.orange, origin_y=-.5, speed=8)
# player.collider = BoxCollider(player, Vec3(0, 1, 0), Vec3(1, 2, 1))
#
# gun = Entity(model='cube', parent=camera, position=(.5, -.25, .25), scale=(.3, .2, 1), origin_z=-.5,
# color=color.red, on_cooldown=False)
#
# shootables_parent = Entity()
# mouse.traverse_target = shootables_parent
#
# for i in range(16):
# Entity(model='cube', origin_y=-.5, scale=2, texture='brick', texture_scale=(1, 2),
# x=random.uniform(-8, 8),
# z=random.uniform(-8, 8) + 8,
# collider='box',
# scale_y=random.uniform(2, 3),
# color=color.hsv(0, 0, random.uniform(.9, 1))
# )
#
# sun = DirectionalLight()
# sun.look_at(Vec3(1, -1, -1))
# Sky()
#
# vr = VideoRecorder(duration=10)
#
#
# def input(key):
# if key == '5':
# vr.start_recording()
# if key == '6':
# vr.stop_recording()
#
#
# app.run()
......@@ -16,7 +16,6 @@ from simulators.ursina.entities.body_timer import TimeData
from simulators.ursina.entities.entity_utils import get_value_direction_vectors
from simulators.ursina.ursina_event import UrsinaEvent
camera_follow_light = None # 摄像机固定,不会跟随光
camera_follow_light = 'ForwardView' # 摄像机跟随光,方向是向前看
# 实例化一个初始化对象(订阅事件,记录到达每个行星所需要的时间)
......@@ -111,7 +110,7 @@ init.body_arrived = body_arrived
# 使用 ursina 查看的运行效果
# 常用快捷键: P:运行和暂停 O:重新开始 I:显示天体轨迹
# position = 左-右+、上+下-、前+后-
ursina_run(bodies, 60,
ursina_run(bodies, 5,
position=init.camera_position,
# show_trail=init.show_trail,
show_timer=True,
......
......@@ -36,6 +36,10 @@ class UrsinaSimulator(Simulator):
"""
def __init__(self, bodies_sys: System):
# window.borderless = False
window.title = 'universe_sim' # '宇宙模拟器'
icon = find_file("images/icon.ico")
window.icon = icon
self.app = Ursina()
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '1' # 选择第二个GPU
......@@ -263,9 +267,6 @@ class UrsinaSimulator(Simulator):
# rotation=(0, 0, 0))
def run(self, dt, **kwargs):
window.title = '宇宙模拟器'
# 默认非近距离查看
view_closely = False
if "view_closely" in kwargs:
......
import cv2
from PIL import ImageGrab
import numpy as np
import argparse
import time
global img
global point1, point2
def on_mouse(event, x, y, flags, param):
global img, point1, point2
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), thickness=2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), thickness=2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), thickness=2)
cv2.imshow('image', img2)
def select_roi(frame):
global img, point1, point2
img = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
winname = 'image'
cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setMouseCallback(winname, on_mouse)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return point1, point2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fps', type=int, default=50, help='frame per second')
parser.add_argument('--total_time', type=int, default=15, help='video total time')
parser.add_argument('--savename', type=str, default='video.mp4', help='save file name')
parser.add_argument('--screen_type', default=1, type=int, choices=[0, 1], help='1: full screen, 0: region screen')
args = parser.parse_args()
print('等到3秒,请切换到录屏的页面')
if args.screen_type == 0:
print('Press Esc to close window')
time.sleep(3)
curScreen = ImageGrab.grab() # 获取屏幕对象
if args.screen_type:
height, width = curScreen.size
min_x, min_y, max_x, max_y = 0, 0, width, height
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(args.savename, fourcc, args.fps, (height, width))
else:
point1, point2 = select_roi(curScreen)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
max_x = max(point1[0], point2[0])
max_y = max(point1[1], point2[1])
height, width = max_y - min_y, max_x - min_x
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(args.savename, fourcc, args.fps, (width, height))
imageNum = 0
while True:
imageNum += 1
captureImage = ImageGrab.grab() # 抓取屏幕
frame = cv2.cvtColor(np.array(captureImage), cv2.COLOR_RGB2BGR)
if args.screen_type == 0:
frame = frame[min_y:max_y, min_x:max_x, :]
if imageNum < args.fps * args.total_time:
video.write(frame)
# 退出条件
if cv2.waitKey(50) == ord('q') or imageNum > args.fps * args.total_time:
break
video.release()
cv2.destroyAllWindows()
\ No newline at end of file
......@@ -4,71 +4,33 @@ import numpy as np
import argparse
import time
import os
global img
global point1, point2
import win32gui
import win32ui
import win32con
import win32api
#
# def on_mouse(event, x, y, flags, param):
# global img, point1, point2
# img2 = img.copy()
# if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
# point1 = (x, y)
# cv2.circle(img2, point1, 10, (0, 255, 0), thickness=2)
# cv2.imshow('image', img2)
# elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
# cv2.rectangle(img2, point1, (x, y), (255, 0, 0), thickness=2)
# cv2.imshow('image', img2)
# elif event == cv2.EVENT_LBUTTONUP: # 左键释放
# point2 = (x, y)
# cv2.rectangle(img2, point1, point2, (0, 0, 255), thickness=2)
# cv2.imshow('image', img2)
#
#
# def select_roi(frame):
# global img, point1, point2
# img = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# winname = 'image'
# cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
# cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# cv2.setMouseCallback(winname, on_mouse)
# cv2.imshow(winname, img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# return point1, point2
FFMPEG_PATH = "F:\\Tools\\ffmpeg"
FFMPEG_PATH= "F:\\Tools\\ffmpeg"
def crop(mp4_file):
# "ffmpeg -i 1年等于11个月.mp4 -vf crop=1724:972:194:108 1年等于11个月_crop.mp4 -y"
# "ffmpeg -i input.mp4 -vf crop=1724:972:194:108 output.mp4 -y"
cmd = 'SET PATH=%PATH%;"' + FFMPEG_PATH + '" & '
cmd = cmd + 'ffmpeg -i "' + mp4_file + '" -vf crop=1724:972:194:108 "' + mp4_file + '_crop.mp4" -y'
val = os.system(cmd)
if val == 0:
print("success")
print("裁剪视频成功")
else:
print("fail..")
print("裁剪视频失败")
window_name="earth"
window_name="fiction"
window_name="funny"
window_name="science"
window_name="solar_system"
window_name="tri_bodies"
window_name="wonders"
def get_window_img_dc(window_name="science"):
def get_window_img_dc(window_name="universe_sim"):
# 获取桌面
# hdesktop = win32gui.GetDesktopWindow()
handle = win32gui.FindWindow(None, window_name)
return handle
if __name__ == '__main__':
def record():
parser = argparse.ArgumentParser()
parser.add_argument('--fps', type=int, default=30, help='frame per second')
parser.add_argument('--total_time', type=int, default=10000000, help='video total time')
......@@ -76,15 +38,11 @@ if __name__ == '__main__':
parser.add_argument('--screen_type', default=0, type=int, choices=[0, 1], help='1: full screen, 0: region screen')
args = parser.parse_args()
print('等到3秒,请切换到录屏的页面')
if args.screen_type == 0:
print('Press Esc to close window')
last_time = time.time() * 1000
time.sleep(3)
curScreen = ImageGrab.grab() # 获取屏幕对象
if args.screen_type:
curScreen = ImageGrab.grab() # 获取屏幕对象
height, width = curScreen.size
min_x, min_y, max_x, max_y = 0, 0, width, height
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
......@@ -102,12 +60,13 @@ if __name__ == '__main__':
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(args.savename, fourcc, args.fps, (height, width))
wait_ms = 1000 / args.fps
# wait_ms = 1000 / args.fps
imageNum = 0
print("查找窗口...")
print("查找模拟器窗口")
while True:
handle = get_window_img_dc()
if handle > 0:
print(handle)
break
time.sleep(0.001)
print("开始捕捉...")
......@@ -115,15 +74,16 @@ if __name__ == '__main__':
while True:
handle = get_window_img_dc()
if handle == 0:
print("模拟器窗口关闭")
break
current_time = time.time() * 1000
next_frame_time = last_time + wait_ms
if current_time < next_frame_time:
time.sleep((next_frame_time - current_time) / 1000)
print((next_frame_time - current_time) / 1000)
last_time = time.time() * 1000
# current_time = time.time() * 1000
# next_frame_time = last_time + wait_ms
# if current_time < next_frame_time:
# time.sleep((next_frame_time - current_time) / 1000)
# print((next_frame_time - current_time) / 1000)
#
# last_time = time.time() * 1000
imageNum += 1
captureImage = ImageGrab.grab() # 抓取屏幕
frame = cv2.cvtColor(np.array(captureImage), cv2.COLOR_RGB2BGR)
......@@ -141,8 +101,12 @@ if __name__ == '__main__':
print("退出...")
break
print("保存中...")
print("视频保存")
video.release()
cv2.destroyAllWindows()
# crop('video.mp4')
print("完成")
if __name__ == '__main__':
record()
import numpy as np
import win32gui
import win32ui
import win32con
import win32api
import cv2
from PIL import ImageGrab
import time
from dataclasses import dataclass
@dataclass
class Args:
savename: str = ''
fps: int = 0
total_time: int = 0
def get_window_img_dc(window_name):
# 获取桌面
# hdesktop = win32gui.GetDesktopWindow()
handle = win32gui.FindWindow(None, window_name)
# 分辨率适应
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
# 创建设备描述表
desktop_dc = win32gui.GetWindowDC(handle)
img_dc = win32ui.CreateDCFromHandle(desktop_dc)
return img_dc, width, height
window_img_dc, width, height = get_window_img_dc("solar_system")
def screenshot():
# 创建一个内存设备描述表
mem_dc = window_img_dc.CreateCompatibleDC()
# 创建位图对象
screenshot = win32ui.CreateBitmap()
screenshot.CreateCompatibleBitmap(window_img_dc, width, height)
mem_dc.SelectObject(screenshot)
# 截图至内存设备描述表
mem_dc.BitBlt((0, 0), (width, height), window_img_dc, (0, 0), win32con.SRCCOPY)
# 将截图保存到文件中
# screenshot.SaveBitmapFile(mem_dc, 'screenshot.bmp')
signedIntsArray = screenshot.GetBitmapBits(True)
# 下面3个语句都能实现转换,推荐第1个
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height, width, 4)
# 内存释放
mem_dc.DeleteDC()
win32gui.DeleteObject(screenshot.GetHandle())
img = img[:, :, 0:3] # 去掉透明数据
return img
def show_image(img):
# img = Image.open(r".\image.jpg")
# img = img.convert("RGBA") # 转换获取信息
# pixdata = img.load()
# cv.imshow("name",img)
from PIL import Image
image = Image.fromarray(img)
print(type(image)) # 结果为<class 'PIL.JpegImagePlugin.JpegImageFile'>
print(image.size) # 结果为(822,694),这里注意Image输出的结果先显示列数,后显示行数
image.save(r"./1.jpg")
image.show()
if __name__ == '__main__':
# parser.add_argument('--fps', type=int, default=30, help='frame per second')
# parser.add_argument('--total_time', type=int, default=10, help='video total time')
# parser.add_argument('--savename', type=str, default='video.mp4', help='save file name')
# parser.add_argument('--screen_type', default=0, type=int, choices=[0, 1], help='1: full screen, 0: region screen')
img = screenshot()
height, width, _ = img.shape
args = Args()
args.savename = "video.mp4"
args.fps = 30
args.total_time = 10
# left, top, right, bottom = 194, 108, 1724, 972
# bbox = (left, top, right, bottom)
# # curScreen = ImageGrab.grab(bbox) # 获取屏幕对象
# # point1, point2 = select_roi(curScreen)
# # print(point1, point2) # (184, 71) (1719, 932)
# point1, point2 = (194, 108), (1724, 972)
# print(point1, point2) # (184, 71) (1719, 932)
# min_x = min(point1[0], point2[0])
# min_y = min(point1[1], point2[1])
# max_x = max(point1[0], point2[0])
# max_y = max(point1[1], point2[1])
# width, height = max_y - min_y, max_x - min_x
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(args.savename, fourcc, args.fps, (height, width))
wait_ms = 1000 / args.fps
imageNum = 0
while True:
# current_time = time.time() * 1000
# next_frame_time = last_time + wait_ms
# if current_time < next_frame_time:
# time.sleep((next_frame_time - current_time) / 1000)
# print((next_frame_time - current_time) / 1000)
last_time = time.time() * 1000
if imageNum == 0:
captureImage = img
else:
captureImage = screenshot()
imageNum += 1
frame = cv2.cvtColor(np.array(captureImage), cv2.COLOR_RGB2BGR)
# if args.screen_type == 0:
# frame = frame[min_y:max_y, min_x:max_x, :]
# print(imageNum, args.fps, args.total_time)
if imageNum < args.fps * args.total_time:
video.write(frame)
# 退出条件
# if cv2.waitKey(50) == ord('q') or imageNum > args.fps * args.total_time:
#
k = cv2.waitKey(1)
# print(k)
if k == 27 or imageNum > args.fps * args.total_time: # Esc key to stop
print("退出...")
break
print("保存中...")
video.release()
cv2.destroyAllWindows()
print("完成")
文件已添加
import sys
import time
from PIL import ImageGrab
import cv2
from pathlib import Path
import numpy as np
from numba import jit
# pip install pynput -i https://pypi.douban.com/simple/
from pynput import keyboard
from threading import Thread
@jit(nopython=True)
def average_n(x, y):
"""Numpy计算趋近值"""
return ((x + y + y) // 3).astype(x.dtype)
class ScreenshotVideo(Thread):
def __init__(self, width, high, path='', fps=15):
"""初始化参数"""
super().__init__()
self.save_file = path
self.best_fps = fps
self.fps = fps
self.width = width
self.high = high
self.spend_time = 1
self.flag = False
self.kill = False
self.video = None
def __call__(self, path):
"""重载视频路径,便于类的二次调用"""
self.save_file = Path(path)
self.video = self.init_videowriter(self.save_file)
@staticmethod
def screenshot():
"""静态方法,屏幕截图,并转换为np.array数组"""
return np.array(ImageGrab.grab())
@staticmethod
def get_fourcc(name):
"""视频编码字典"""
fourcc_maps = {'.avi': 'I420',
'.m4v': 'mp4v',
'.mp4': 'avc1',
'.ogv': 'THEO',
'.flv': 'FLV1',
}
return fourcc_maps.get(name)
def init_videowriter(self, path):
"""获取视频编码并新建视频文件"""
if not path:
raise Exception('视频路径未设置,请设置\nvideo = ScreenshotVideo(fps,width,high)\nvideo = video(video_path)')
path = Path(path) if isinstance(path, str) else path
fourcc = cv2.VideoWriter_fourcc(*self.get_fourcc(path.suffix))
return cv2.VideoWriter(path.as_posix(), fourcc, self.fps, (self.width, self.high))
def video_record_doing(self, img):
"""将BGR数组转换为RGB数组"""
im_cv = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.video.write(im_cv)
def video_record_end(self):
"""录制结束,根据条件判断文件是否保存"""
self.video.release()
cv2.destroyAllWindows()
if self.save_file and self.kill:
Path(self.save_file).unlink()
def video_best_fps(self, path):
"""获取电脑录制视频的最优帧率"""
video = cv2.VideoCapture(path)
fps = video.get(cv2.CAP_PROP_FPS)
count = video.get(cv2.CAP_PROP_FRAME_COUNT)
self.best_fps = int(fps * ((int(count) / fps) / self.spend_time))
video.release()
def pre_video_record(self):
"""预录制,以获取最佳fps值"""
self.video = self.init_videowriter('test.mp4')
start_time = time.time()
for _ in range(10):
im = self.screenshot()
self.video_record_doing(im)
self.spend_time = round(time.time() - start_time, 4)
self.video_record_end()
time.sleep(2)
self.video_best_fps('test.mp4')
Path('test.mp4').unlink()
def insert_frame_array(self, frame_list):
"""Numpy增强截图信息"""
fps_n = round(self.fps / self.best_fps)
if fps_n <= 0:
return frame_list
times = int(np.log2(fps_n)) # 倍率
for _ in range(times):
frame_list2 = map(average_n, [frame_list[0]] + frame_list[:-1], frame_list)
frame_list = [[x, y] for x, y in zip(frame_list2, frame_list)]
frame_list = [j for i in frame_list for j in i]
return frame_list
def frame2video_run(self):
"""使用opencv将连续型截图转换为视频"""
self.video = self.init_videowriter(self.save_file)
start_time = time.time()
frame_list = []
while True:
frame_list.append(self.screenshot())
if self.flag:
break
self.spend_time = round(time.time() - start_time, 4)
if not self.kill: # 视频录制不被终止将逐帧处理图像
frame_list = self.insert_frame_array(frame_list)
print("frame_list =", len(frame_list))
for im in frame_list:
self.video_record_doing(im)
self.video_record_end()
def hotkey(self):
"""热键监听"""
with keyboard.Listener(on_press=self.on_press) as listener:
listener.join()
def on_press(self, key):
try:
if key.char == 't': # 录屏结束,保存视频
self.flag = True
print("停止中...")
elif key.char == 'k': # 录屏中止,删除文件
self.flag = True
self.kill = True
except Exception as e:
print(e)
def run(self):
# 运行函数
# 设置守护线程
Thread(target=self.hotkey, daemon=True).start()
# 运行截图函数
self.frame2video_run()
screen = ImageGrab.grab()
width, high = screen.size
video = ScreenshotVideo(width, high, fps=60)
video.pre_video_record() # 预录制获取最优fps
video('test1.mp4')
video.run()
print("结束...")
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册