提交 c48d4d47 编写于 作者: 三月三net's avatar 三月三net

Python超人-宇宙模拟器

上级 504dc7e4
......@@ -26,9 +26,9 @@ init = SpeedOfLightInit(camera_follow_light)
# 创建太阳系天体(忽略质量,引力无效,初速度全部为0)
bodies = create_solar_system_bodies(ignore_mass=True, init_velocity=[0, 0, 0])
# camera_pos = "left"
camera_pos = "left"
camera_pos = "right"
camera_l2r = 0.01 * AU
camera_l2r = 0.1 * AU
if camera_pos == "right": # 摄像机右眼
init.light_init_position[0] += camera_l2r
......@@ -59,7 +59,9 @@ def on_reset():
init.arrived_info = "距离[太阳中心]:${distance}\n\n光速飞船速度:${speed}\n\n"
def on_timer_changed(time_data: TimeData):
init.text_panel.parent.enabled = False
velocity, _ = get_value_direction_vectors(light_ship.velocity)
distance = round(init.light_ship.position[2] / AU, 4)
text = init.arrived_info.replace("${distance}", "%.4f AU" % distance)
......@@ -86,6 +88,8 @@ def body_arrived(body):
light_ship.acceleration = [0, -50, 200]
elif body.name == "海王星": # 到达海王星,加速前进,并进行攀升
light_ship.acceleration = [-3, 48, 300]
elif body.name == "冥王星":
exit(0)
# print(body)
# def body_arrived(body):
......@@ -109,7 +113,7 @@ init.body_arrived = body_arrived
# position = 左-右+、上+下-、前+后-
ursina_run(bodies, 60,
position=init.camera_position,
show_trail=init.show_trail,
# show_trail=init.show_trail,
show_timer=True,
view_closely=init.view_closely,
bg_music="sounds/interstellar.mp3")
......@@ -3,45 +3,76 @@ from PIL import ImageGrab
import numpy as np
import argparse
import time
import os
global img
global point1, point2
import win32gui
import win32ui
import win32con
import win32api
#
# def on_mouse(event, x, y, flags, param):
# global img, point1, point2
# img2 = img.copy()
# if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
# point1 = (x, y)
# cv2.circle(img2, point1, 10, (0, 255, 0), thickness=2)
# cv2.imshow('image', img2)
# elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
# cv2.rectangle(img2, point1, (x, y), (255, 0, 0), thickness=2)
# cv2.imshow('image', img2)
# elif event == cv2.EVENT_LBUTTONUP: # 左键释放
# point2 = (x, y)
# cv2.rectangle(img2, point1, point2, (0, 0, 255), thickness=2)
# cv2.imshow('image', img2)
#
#
# def select_roi(frame):
# global img, point1, point2
# img = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# winname = 'image'
# cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
# cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# cv2.setMouseCallback(winname, on_mouse)
# cv2.imshow(winname, img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# return point1, point2
def on_mouse(event, x, y, flags, param):
global img, point1, point2
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), thickness=2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), thickness=2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), thickness=2)
cv2.imshow('image', img2)
def select_roi(frame):
global img, point1, point2
img = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
winname = 'image'
cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setMouseCallback(winname, on_mouse)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return point1, point2
FFMPEG_PATH= "F:\\Tools\\ffmpeg"
def crop(mp4_file):
# "ffmpeg -i 1年等于11个月.mp4 -vf crop=1724:972:194:108 1年等于11个月_crop.mp4 -y"
cmd = 'SET PATH=%PATH%;"' + FFMPEG_PATH + '" & '
cmd = cmd + 'ffmpeg -i "' + mp4_file + '" -vf crop=1724:972:194:108 "' + mp4_file + '_crop.mp4" -y'
val = os.system(cmd)
if val == 0:
print("success")
else:
print("fail..")
window_name="earth"
window_name="fiction"
window_name="funny"
window_name="science"
window_name="solar_system"
window_name="tri_bodies"
window_name="wonders"
def get_window_img_dc(window_name="science"):
# 获取桌面
# hdesktop = win32gui.GetDesktopWindow()
handle = win32gui.FindWindow(None, window_name)
return handle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fps', type=int, default=30, help='frame per second')
parser.add_argument('--total_time', type=int, default=10, help='video total time')
parser.add_argument('--savename', type=str, default='video.mp4', help='save file name')
parser.add_argument('--total_time', type=int, default=10000000, help='video total time')
parser.add_argument('--savename', type=str, default='video_right.mp4', help='save file name')
parser.add_argument('--screen_type', default=0, type=int, choices=[0, 1], help='1: full screen, 0: region screen')
args = parser.parse_args()
......@@ -73,8 +104,19 @@ if __name__ == '__main__':
wait_ms = 1000 / args.fps
imageNum = 0
print("查找窗口...")
while True:
handle = get_window_img_dc()
if handle > 0:
break
time.sleep(0.001)
print("开始捕捉...")
while True:
handle = get_window_img_dc()
if handle == 0:
break
current_time = time.time() * 1000
next_frame_time = last_time + wait_ms
if current_time < next_frame_time:
......@@ -102,4 +144,5 @@ if __name__ == '__main__':
print("保存中...")
video.release()
cv2.destroyAllWindows()
# crop('video.mp4')
print("完成")
import numpy as np
import win32gui
import win32ui
import win32con
import win32api
import cv2
from PIL import ImageGrab
import time
from dataclasses import dataclass
@dataclass
class Args:
savename: str = ''
fps: int = 0
total_time: int = 0
def get_window_img_dc(window_name):
# 获取桌面
# hdesktop = win32gui.GetDesktopWindow()
handle = win32gui.FindWindow(None, window_name)
# 分辨率适应
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
# 创建设备描述表
desktop_dc = win32gui.GetWindowDC(handle)
img_dc = win32ui.CreateDCFromHandle(desktop_dc)
return img_dc, width, height
window_img_dc, width, height = get_window_img_dc("solar_system")
def screenshot():
# 创建一个内存设备描述表
mem_dc = window_img_dc.CreateCompatibleDC()
# 创建位图对象
screenshot = win32ui.CreateBitmap()
screenshot.CreateCompatibleBitmap(window_img_dc, width, height)
mem_dc.SelectObject(screenshot)
# 截图至内存设备描述表
mem_dc.BitBlt((0, 0), (width, height), window_img_dc, (0, 0), win32con.SRCCOPY)
# 将截图保存到文件中
# screenshot.SaveBitmapFile(mem_dc, 'screenshot.bmp')
signedIntsArray = screenshot.GetBitmapBits(True)
# 下面3个语句都能实现转换,推荐第1个
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height, width, 4)
# 内存释放
mem_dc.DeleteDC()
win32gui.DeleteObject(screenshot.GetHandle())
img = img[:, :, 0:3] # 去掉透明数据
return img
def show_image(img):
# img = Image.open(r".\image.jpg")
# img = img.convert("RGBA") # 转换获取信息
# pixdata = img.load()
# cv.imshow("name",img)
from PIL import Image
image = Image.fromarray(img)
print(type(image)) # 结果为<class 'PIL.JpegImagePlugin.JpegImageFile'>
print(image.size) # 结果为(822,694),这里注意Image输出的结果先显示列数,后显示行数
image.save(r"./1.jpg")
image.show()
if __name__ == '__main__':
# parser.add_argument('--fps', type=int, default=30, help='frame per second')
# parser.add_argument('--total_time', type=int, default=10, help='video total time')
# parser.add_argument('--savename', type=str, default='video.mp4', help='save file name')
# parser.add_argument('--screen_type', default=0, type=int, choices=[0, 1], help='1: full screen, 0: region screen')
img = screenshot()
height, width, _ = img.shape
args = Args()
args.savename = "video.mp4"
args.fps = 30
args.total_time = 10
# left, top, right, bottom = 194, 108, 1724, 972
# bbox = (left, top, right, bottom)
# # curScreen = ImageGrab.grab(bbox) # 获取屏幕对象
# # point1, point2 = select_roi(curScreen)
# # print(point1, point2) # (184, 71) (1719, 932)
# point1, point2 = (194, 108), (1724, 972)
# print(point1, point2) # (184, 71) (1719, 932)
# min_x = min(point1[0], point2[0])
# min_y = min(point1[1], point2[1])
# max_x = max(point1[0], point2[0])
# max_y = max(point1[1], point2[1])
# width, height = max_y - min_y, max_x - min_x
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter(args.savename, fourcc, args.fps, (height, width))
wait_ms = 1000 / args.fps
imageNum = 0
while True:
# current_time = time.time() * 1000
# next_frame_time = last_time + wait_ms
# if current_time < next_frame_time:
# time.sleep((next_frame_time - current_time) / 1000)
# print((next_frame_time - current_time) / 1000)
last_time = time.time() * 1000
if imageNum == 0:
captureImage = img
else:
captureImage = screenshot()
imageNum += 1
frame = cv2.cvtColor(np.array(captureImage), cv2.COLOR_RGB2BGR)
# if args.screen_type == 0:
# frame = frame[min_y:max_y, min_x:max_x, :]
# print(imageNum, args.fps, args.total_time)
if imageNum < args.fps * args.total_time:
video.write(frame)
# 退出条件
# if cv2.waitKey(50) == ord('q') or imageNum > args.fps * args.total_time:
#
k = cv2.waitKey(1)
# print(k)
if k == 27 or imageNum > args.fps * args.total_time: # Esc key to stop
print("退出...")
break
print("保存中...")
video.release()
cv2.destroyAllWindows()
print("完成")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册